code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class __lowerCamelCase ( __lowercase ): def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' warnings.warn( """The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use SegformerImageProcessor instead.""" , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
317
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(snake_case_ ) ) def __UpperCAmelCase ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> bool: """simple docstring""" if index == len(snake_case_ ): return True # Recursive Step for i in range(snake_case_ ): if valid_coloring(graph[index] , snake_case_ , snake_case_ ): # Color current vertex _lowerCAmelCase = i # Validate coloring if util_color(snake_case_ , snake_case_ , snake_case_ , index + 1 ): return True # Backtrack _lowerCAmelCase = -1 return False def __UpperCAmelCase ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [-1] * len(snake_case_ ) if util_color(snake_case_ , snake_case_ , snake_case_ , 0 ): return colored_vertices return []
317
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def A__ (self , lowerCamelCase , lowerCamelCase=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1e-3 ) def A__ (self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A__ (self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ (self ): '''simple docstring''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def A__ (self ): '''simple docstring''' _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
317
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file SCREAMING_SNAKE_CASE : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def __UpperCAmelCase ( snake_case_ : List[Any]=None ) -> Any: """simple docstring""" if subparsers is not None: _lowerCAmelCase = subparsers.add_parser("""tpu-config""" , description=_description ) else: _lowerCAmelCase = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments _lowerCAmelCase = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=snake_case_ , default=snake_case_ , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=snake_case_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=snake_case_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) _lowerCAmelCase = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=snake_case_ , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(snake_case_ ): _lowerCAmelCase = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: _lowerCAmelCase = defaults.command_file if not args.command and defaults.commands is not None: _lowerCAmelCase = defaults.commands if not args.tpu_name: _lowerCAmelCase = defaults.tpu_name if not args.tpu_zone: _lowerCAmelCase = defaults.tpu_zone if args.accelerate_version == "dev": _lowerCAmelCase = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": _lowerCAmelCase = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , snake_case_ ): _lowerCAmelCase = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: _lowerCAmelCase = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , snake_case_ ): _lowerCAmelCase = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate _lowerCAmelCase = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command _lowerCAmelCase = """; """.join(snake_case_ ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess _lowerCAmelCase = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {" ".join(snake_case_ )}""" ) return subprocess.run(snake_case_ ) print("""Successfully setup pod.""" ) def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = tpu_command_parser() _lowerCAmelCase = parser.parse_args() tpu_command_launcher(snake_case_ )
317
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict: """simple docstring""" return getitem, k def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]: """simple docstring""" return setitem, k, v def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]: """simple docstring""" return delitem, k def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str: """simple docstring""" try: return fun(snake_case_ , *snake_case_ ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE : int = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) SCREAMING_SNAKE_CASE : List[Any] = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] SCREAMING_SNAKE_CASE : Any = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] SCREAMING_SNAKE_CASE : Union[str, Any] = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] SCREAMING_SNAKE_CASE : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple: """simple docstring""" _lowerCAmelCase = HashMap(initial_block_size=4 ) _lowerCAmelCase = {} for _, (fun, *args) in enumerate(snake_case_ ): _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) assert my_res == py_res assert str(snake_case_ ) == str(snake_case_ ) assert set(snake_case_ ) == set(snake_case_ ) assert len(snake_case_ ) == len(snake_case_ ) assert set(my.items() ) == set(py.items() ) def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" def is_public(snake_case_ : str ) -> bool: return not name.startswith("""_""" ) _lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )} _lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )} assert dict_public_names > hash_public_names
317
1
"""simple docstring""" # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib SCREAMING_SNAKE_CASE : Union[str, Any] = get_logger() SCREAMING_SNAKE_CASE : Optional[dict] = None class __lowerCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): def __init__(self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ): '''simple docstring''' super().__init__(features=lowerCamelCase ) import jax from jaxlib.xla_client import Device if isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError( f"""Expected {device} to be a `str` not {type(lowerCamelCase )}, as `jaxlib.xla_extension.Device` """ """is not serializable neither with `pickle` nor with `dill`. Instead you can surround """ """the device with `str()` to get its string identifier that will be internally mapped """ """to the actual `jaxlib.xla_extension.Device`.""" ) _lowerCAmelCase = device if isinstance(lowerCamelCase , lowerCamelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _lowerCAmelCase = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f"""Device with string identifier {self.device} not listed among the available """ f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """ f"""device: {str(jax.devices()[0] )}.""" ) _lowerCAmelCase = str(jax.devices()[0] ) _lowerCAmelCase = jnp_array_kwargs @staticmethod def A__ (): '''simple docstring''' import jax return {str(lowerCamelCase ): device for device in jax.devices()} def A__ (self , lowerCamelCase ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(lowerCamelCase , lowerCamelCase ) and column: if all( isinstance(lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(lowerCamelCase , axis=0 ) return column def A__ (self , lowerCamelCase ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(lowerCamelCase , (str, bytes, type(lowerCamelCase )) ): return value elif isinstance(lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _lowerCAmelCase = {} if isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _lowerCAmelCase = {"""dtype""": jnp.intaa} else: _lowerCAmelCase = {"""dtype""": jnp.intaa} elif isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _lowerCAmelCase = {"""dtype""": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase , PIL.Image.Image ): _lowerCAmelCase = np.asarray(lowerCamelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _lowerCAmelCase = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def A__ (self , lowerCamelCase ): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(lowerCamelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(lowerCamelCase , """__array__""" ) and not isinstance(lowerCamelCase , jax.Array ): _lowerCAmelCase = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] ) elif isinstance(lowerCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase , map_list=lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.numpy_arrow_extractor().extract_row(lowerCamelCase ) _lowerCAmelCase = self.python_features_decoder.decode_row(lowerCamelCase ) return self.recursive_tensorize(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.numpy_arrow_extractor().extract_column(lowerCamelCase ) _lowerCAmelCase = self.python_features_decoder.decode_column(lowerCamelCase , pa_table.column_names[0] ) _lowerCAmelCase = self.recursive_tensorize(lowerCamelCase ) _lowerCAmelCase = self._consolidate(lowerCamelCase ) return column def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(lowerCamelCase ) _lowerCAmelCase = self.python_features_decoder.decode_batch(lowerCamelCase ) _lowerCAmelCase = self.recursive_tensorize(lowerCamelCase ) for column_name in batch: _lowerCAmelCase = self._consolidate(batch[column_name] ) return batch
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations(snake_case_ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( snake_case_ : int , snake_case_ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case_ ) for item in array ) _lowerCAmelCase = answer return answer _lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" _lowerCAmelCase = [0] * (target + 1) _lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(snake_case_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Any = 5 SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5] print(combination_sum_iv(n, array, target))
317
1
"""simple docstring""" from math import asin, atan, cos, radians, sin, sqrt, tan SCREAMING_SNAKE_CASE : str = 6_3_7_8_1_3_7.0 SCREAMING_SNAKE_CASE : Optional[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5 SCREAMING_SNAKE_CASE : Any = 6_3_7_8_1_3_7 def __UpperCAmelCase ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float: """simple docstring""" _lowerCAmelCase = (AXIS_A - AXIS_B) / AXIS_A _lowerCAmelCase = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _lowerCAmelCase = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _lowerCAmelCase = radians(snake_case_ ) _lowerCAmelCase = radians(snake_case_ ) # Equation _lowerCAmelCase = sin((phi_a - phi_a) / 2 ) _lowerCAmelCase = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _lowerCAmelCase = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None: """simple docstring""" _lowerCAmelCase = "" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ): _lowerCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case_ ) return decoded def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]: """simple docstring""" _lowerCAmelCase = [] for key in product(snake_case_ , repeat=3 ): _lowerCAmelCase = try_key(snake_case_ , snake_case_ ) if encoded is not None: possibles.append(snake_case_ ) return possibles def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" ) _lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )] _lowerCAmelCase = filter_valid_chars(snake_case_ ) for common_word in COMMON_WORDS: _lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ ) if len(snake_case_ ) == 1: break _lowerCAmelCase = possibles[0] return sum(ord(snake_case_ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=64 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope _lowerCAmelCase = vocab_size - 1 def A__ (self ): '''simple docstring''' _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def A__ (self ): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase = True return config, input_ids, input_mask, token_labels def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = GPTNeoXModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase ) _lowerCAmelCase = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = True _lowerCAmelCase = GPTNeoXModel(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = GPTNeoXForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = GPTNeoXForQuestionAnswering(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = GPTNeoXForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = GPTNeoXForTokenClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = True _lowerCAmelCase = GPTNeoXForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # first forward pass _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase ) _lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase ) _lowerCAmelCase = output_from_no_past["""hidden_states"""][0] _lowerCAmelCase = model( lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0] # select random slice _lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): __UpperCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def A__ (self ): '''simple docstring''' _lowerCAmelCase = GPTNeoXModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=64 , num_attention_heads=8 ) def A__ (self ): '''simple docstring''' self.config_tester.run_common_tests() def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() _lowerCAmelCase = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def A__ (self ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) _lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase = GPTNeoXModel(lowerCamelCase ) original_model.to(lowerCamelCase ) original_model.eval() _lowerCAmelCase = original_model(lowerCamelCase ).last_hidden_state _lowerCAmelCase = original_model(lowerCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0} _lowerCAmelCase = GPTNeoXModel(lowerCamelCase ) scaled_model.to(lowerCamelCase ) scaled_model.eval() _lowerCAmelCase = scaled_model(lowerCamelCase ).last_hidden_state _lowerCAmelCase = scaled_model(lowerCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) ) @require_torch class __lowerCamelCase ( unittest.TestCase ): @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: _lowerCAmelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowerCamelCase ) _lowerCAmelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _lowerCAmelCase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" _lowerCAmelCase = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 ) _lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase )[0] self.assertEqual(lowerCamelCase , lowerCamelCase )
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int: """simple docstring""" _lowerCAmelCase = limit + 1 _lowerCAmelCase = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from functools import reduce SCREAMING_SNAKE_CASE : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __UpperCAmelCase ( snake_case_ : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" from functools import reduce SCREAMING_SNAKE_CASE : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __UpperCAmelCase ( snake_case_ : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( snake_case_ : list[list[int]] ) -> int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(snake_case_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(snake_case_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( snake_case_ : Dict ) -> Union[str, Any]: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" _lowerCAmelCase = [1, 2, 3] with pytest.raises(snake_case_ ): with parallel_backend("""unsupported backend""" ): map_nested(snake_case_ , snake_case_ , num_proc=2 ) with pytest.raises(snake_case_ ): with parallel_backend("""unsupported backend""" ): map_nested(snake_case_ , snake_case_ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def __UpperCAmelCase ( snake_case_ : int ) -> Optional[int]: """simple docstring""" _lowerCAmelCase = [1, 2] _lowerCAmelCase = {"""a""": 1, """b""": 2} _lowerCAmelCase = {"""a""": [1, 2], """b""": [3, 4]} _lowerCAmelCase = {"""a""": {"""1""": 1}, """b""": 2} _lowerCAmelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} _lowerCAmelCase = [2, 3] _lowerCAmelCase = {"""a""": 2, """b""": 3} _lowerCAmelCase = {"""a""": [2, 3], """b""": [4, 5]} _lowerCAmelCase = {"""a""": {"""1""": 2}, """b""": 3} _lowerCAmelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
317
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __UpperCamelCase = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) __UpperCamelCase = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple: """simple docstring""" logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) _lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCAmelCase = SeqaSeqDataset # Get datasets _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCAmelCase = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) _lowerCAmelCase = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) _lowerCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowerCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCAmelCase = train_result.metrics _lowerCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _lowerCAmelCase = data_args.n_val _lowerCAmelCase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" ) _lowerCAmelCase = test_output.metrics _lowerCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _lowerCAmelCase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: _lowerCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) _lowerCAmelCase = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def __UpperCAmelCase ( snake_case_ : Any ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
317
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE : str = 1_6 SCREAMING_SNAKE_CASE : Dict = 3_2 def __UpperCAmelCase ( snake_case_ : Accelerator , snake_case_ : int = 16 ) -> List[str]: """simple docstring""" _lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _lowerCAmelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case_ : List[str] ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCAmelCase = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case_ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": _lowerCAmelCase = 8 else: _lowerCAmelCase = None return tokenizer.pad( snake_case_ , padding="""longest""" , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors="""pt""" , ) # Instantiate dataloaders. _lowerCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) _lowerCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE : Union[str, Any] = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Any ) -> Union[str, Any]: """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case_ ) == "1": _lowerCAmelCase = 2 # New Code # _lowerCAmelCase = int(args.gradient_accumulation_steps ) _lowerCAmelCase = int(args.local_sgd_steps ) # Initialize accelerator _lowerCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case_ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase = config["""lr"""] _lowerCAmelCase = int(config["""num_epochs"""] ) _lowerCAmelCase = int(config["""seed"""] ) _lowerCAmelCase = int(config["""batch_size"""] ) _lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" ) set_seed(snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = get_dataloaders(snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case_ ) # Instantiate scheduler _lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=100 , num_training_steps=(len(snake_case_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() with LocalSGD( accelerator=snake_case_ , model=snake_case_ , local_sgd_steps=snake_case_ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case_ ): _lowerCAmelCase = model(**snake_case_ ) _lowerCAmelCase = output.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase = model(**snake_case_ ) _lowerCAmelCase = outputs.logits.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) _lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , snake_case_ ) def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case_ , default=snake_case_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=snake_case_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=snake_case_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
317
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
1
"""simple docstring""" from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # TODO Update this SCREAMING_SNAKE_CASE : Tuple = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'esm' def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_026 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase , mask_token_id=lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = use_cache _lowerCAmelCase = emb_layer_norm_before _lowerCAmelCase = token_dropout _lowerCAmelCase = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) _lowerCAmelCase = EsmFoldConfig() elif isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = EsmFoldConfig(**lowerCamelCase ) _lowerCAmelCase = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) _lowerCAmelCase = get_default_vocab_list() else: _lowerCAmelCase = vocab_list else: _lowerCAmelCase = None _lowerCAmelCase = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCamelCase ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = super().to_dict() if isinstance(self.esmfold_config , lowerCamelCase ): _lowerCAmelCase = self.esmfold_config.to_dict() return output @dataclass class __lowerCamelCase : __UpperCamelCase = None __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 0 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = 128 __UpperCamelCase = None def A__ (self ): '''simple docstring''' if self.trunk is None: _lowerCAmelCase = TrunkConfig() elif isinstance(self.trunk , lowerCamelCase ): _lowerCAmelCase = TrunkConfig(**self.trunk ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = asdict(self ) _lowerCAmelCase = self.trunk.to_dict() return output @dataclass class __lowerCamelCase : __UpperCamelCase = 48 __UpperCamelCase = 1_024 __UpperCamelCase = 128 __UpperCamelCase = 32 __UpperCamelCase = 32 __UpperCamelCase = 32 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = False __UpperCamelCase = 4 __UpperCamelCase = 128 __UpperCamelCase = None def A__ (self ): '''simple docstring''' if self.structure_module is None: _lowerCAmelCase = StructureModuleConfig() elif isinstance(self.structure_module , lowerCamelCase ): _lowerCAmelCase = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) _lowerCAmelCase = self.sequence_state_dim // self.sequence_head_width _lowerCAmelCase = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = asdict(self ) _lowerCAmelCase = self.structure_module.to_dict() return output @dataclass class __lowerCamelCase : __UpperCamelCase = 384 __UpperCamelCase = 128 __UpperCamelCase = 16 __UpperCamelCase = 128 __UpperCamelCase = 12 __UpperCamelCase = 4 __UpperCamelCase = 8 __UpperCamelCase = 0.1 __UpperCamelCase = 8 __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = 7 __UpperCamelCase = 10 __UpperCamelCase = 1E-8 __UpperCamelCase = 1E5 def A__ (self ): '''simple docstring''' return asdict(self ) def __UpperCAmelCase ( ) -> str: """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
317
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size def A__ (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None def A__ (self ): '''simple docstring''' _lowerCAmelCase = MobileNetVaImageProcessingTester(self ) @property def A__ (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ (self ): '''simple docstring''' pass def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
317
1
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None: """simple docstring""" _lowerCAmelCase = "" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ): _lowerCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case_ ) return decoded def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]: """simple docstring""" _lowerCAmelCase = [] for key in product(snake_case_ , repeat=3 ): _lowerCAmelCase = try_key(snake_case_ , snake_case_ ) if encoded is not None: possibles.append(snake_case_ ) return possibles def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" ) _lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )] _lowerCAmelCase = filter_valid_chars(snake_case_ ) for common_word in COMMON_WORDS: _lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ ) if len(snake_case_ ) == 1: break _lowerCAmelCase = possibles[0] return sum(ord(snake_case_ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" for i in range(len(snake_case_ ) - 1 , 0 , -1 ): _lowerCAmelCase = False for j in range(snake_case_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j] _lowerCAmelCase = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j] _lowerCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')] print(F'{cocktail_shaker_sort(unsorted) = }')
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> list: """simple docstring""" _lowerCAmelCase = len(snake_case_ ) _lowerCAmelCase = [] for i in range(len(snake_case_ ) - pat_len + 1 ): _lowerCAmelCase = True for j in range(snake_case_ ): if s[i + j] != pattern[j]: _lowerCAmelCase = False break if match_found: position.append(snake_case_ ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
317
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple: """simple docstring""" def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: """simple docstring""" _lowerCAmelCase = random.Random() _lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "TensorFlow" @property def A__ (self ): '''simple docstring''' return tf.__version__ def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) _lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients _lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ (self , lowerCamelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) _lowerCAmelCase = meminfo.used _lowerCAmelCase = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase = None else: _lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase ) _lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase = stop_memory_tracing(lowerCamelCase ) if memory is None: _lowerCAmelCase = summary.total else: _lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
317
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { '''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'transfo-xl' __UpperCamelCase = ['mems'] __UpperCamelCase = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = vocab_size _lowerCAmelCase = [] self.cutoffs.extend(lowerCamelCase ) if proj_share_all_but_first: _lowerCAmelCase = [False] + [True] * len(self.cutoffs ) else: _lowerCAmelCase = [False] + [False] * len(self.cutoffs ) _lowerCAmelCase = d_model _lowerCAmelCase = d_embed _lowerCAmelCase = d_head _lowerCAmelCase = d_inner _lowerCAmelCase = div_val _lowerCAmelCase = pre_lnorm _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = mem_len _lowerCAmelCase = same_length _lowerCAmelCase = attn_type _lowerCAmelCase = clamp_len _lowerCAmelCase = sample_softmax _lowerCAmelCase = adaptive _lowerCAmelCase = dropout _lowerCAmelCase = dropatt _lowerCAmelCase = untie_r _lowerCAmelCase = init _lowerCAmelCase = init_range _lowerCAmelCase = proj_init_std _lowerCAmelCase = init_std _lowerCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase ) @property def A__ (self ): '''simple docstring''' logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def A__ (self , lowerCamelCase ): '''simple docstring''' raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
317
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'ctrl' __UpperCamelCase = ['past_key_values'] __UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , lowerCamelCase=246_534 , lowerCamelCase=256 , lowerCamelCase=1_280 , lowerCamelCase=8_192 , lowerCamelCase=48 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1e-6 , lowerCamelCase=0.02 , lowerCamelCase=True , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = dff _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache super().__init__(**lowerCamelCase )
317
"""simple docstring""" import math def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 2 _lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowerCAmelCase = [True] * (end + 1) _lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowerCAmelCase = False start += 1 prime += in_prime _lowerCAmelCase = end + 1 _lowerCAmelCase = min(2 * end , snake_case_ ) while low <= n: _lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: _lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowerCAmelCase = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowerCAmelCase = high + 1 _lowerCAmelCase = min(high + end , snake_case_ ) return prime print(sieve(1_0**6))
317
1
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) SCREAMING_SNAKE_CASE : int = pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Optional[Any] ) -> Optional[int]: """simple docstring""" inspect_dataset(snake_case_ , snake_case_ ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(snake_case_ ) assert "__pycache__" not in os.listdir(snake_case_ ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : int ) -> str: """simple docstring""" inspect_metric(snake_case_ , snake_case_ ) _lowerCAmelCase = path + """.py""" assert script_name in os.listdir(snake_case_ ) assert "__pycache__" not in os.listdir(snake_case_ ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict ) -> Any: """simple docstring""" _lowerCAmelCase = get_dataset_config_info(snake_case_ , config_name=snake_case_ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : int , snake_case_ : int ) -> Union[str, Any]: """simple docstring""" with pytest.raises(snake_case_ ): get_dataset_config_info(snake_case_ , config_name=snake_case_ ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Tuple ) -> int: """simple docstring""" _lowerCAmelCase = get_dataset_config_names(snake_case_ ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int ) -> Any: """simple docstring""" _lowerCAmelCase = get_dataset_infos(snake_case_ ) assert list(infos.keys() ) == expected_configs _lowerCAmelCase = expected_configs[0] assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : str , snake_case_ : Tuple ) -> Dict: """simple docstring""" _lowerCAmelCase = get_dataset_infos(snake_case_ ) assert expected_config in infos _lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int ) -> List[str]: """simple docstring""" with pytest.raises(snake_case_ ): get_dataset_split_names(snake_case_ , config_name=snake_case_ )
317
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0 SCREAMING_SNAKE_CASE : Optional[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = '''''' SCREAMING_SNAKE_CASE : List[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = 2_5_0 def __UpperCAmelCase ( ) -> None: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ ) for index in range(snake_case_ ): _lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase = random_chars(32 ) _lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] _lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) _lowerCAmelCase = [] for anno in new_annos: _lowerCAmelCase = anno[3] - anno[1] _lowerCAmelCase = anno[4] - anno[2] _lowerCAmelCase = anno[1] + width / 2 _lowerCAmelCase = anno[2] + height / 2 _lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(snake_case_ ) with open(F"""{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ): _lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(snake_case_ ) as in_file: _lowerCAmelCase = in_file.readlines() _lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" ) _lowerCAmelCase = [] for obj_list in obj_lists: _lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ ) _lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2 _lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(snake_case_ ) labels.append(snake_case_ ) return img_paths, labels def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" _lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = int(scale_x * output_size[1] ) _lowerCAmelCase = int(scale_y * output_size[0] ) _lowerCAmelCase = [] _lowerCAmelCase = [] for i, index in enumerate(snake_case_ ): _lowerCAmelCase = all_img_list[index] path_list.append(snake_case_ ) _lowerCAmelCase = all_annos[index] _lowerCAmelCase = cva.imread(snake_case_ ) if i == 0: # top-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _lowerCAmelCase = cva.resize( snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _lowerCAmelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __UpperCAmelCase ( snake_case_ : int ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase = ascii_lowercase + digits return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
317
1
"""simple docstring""" import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCamelCase ( __lowercase , __lowercase ): @register_to_config def __init__(self , *, lowerCamelCase = 4 , lowerCamelCase = 768 , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' super().__init__() _lowerCAmelCase = nn.Parameter(torch.zeros(lowerCamelCase ) ) # parameters for additional clip time embeddings _lowerCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase ) # parameters for encoder hidden states _lowerCAmelCase = clip_extra_context_tokens _lowerCAmelCase = nn.Linear( lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) _lowerCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = nn.LayerNorm(lowerCamelCase ) def A__ (self , *, lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _lowerCAmelCase = image_embeddings.shape[0] _lowerCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _lowerCAmelCase = classifier_free_guidance_embeddings.expand( lowerCamelCase , -1 ) _lowerCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _lowerCAmelCase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _lowerCAmelCase = self.embedding_proj(lowerCamelCase ) _lowerCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase ) _lowerCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _lowerCAmelCase = self.clip_extra_context_tokens_proj(lowerCamelCase ) _lowerCAmelCase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens ) _lowerCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 ) _lowerCAmelCase = self.encoder_hidden_states_proj(lowerCamelCase ) _lowerCAmelCase = self.text_encoder_hidden_states_norm(lowerCamelCase ) _lowerCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
317
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main _lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
317
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = tempfile.mkdtemp() # fmt: off _lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _lowerCAmelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } _lowerCAmelCase = os.path.join(self.tmpdirname , lowerCamelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(lowerCamelCase , lowerCamelCase ) def A__ (self , **lowerCamelCase ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase ) def A__ (self , **lowerCamelCase ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ) def A__ (self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _lowerCAmelCase = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 ) _lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase ) _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = image_processor(lowerCamelCase , return_tensors="""np""" ) _lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase ) _lowerCAmelCase = """lower newer""" _lowerCAmelCase = processor(text=lowerCamelCase ) _lowerCAmelCase = tokenizer(lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase ) _lowerCAmelCase = """lower newer""" _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = processor(text=lowerCamelCase , images=lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(lowerCamelCase ): processor() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase ) _lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase = processor.batch_decode(lowerCamelCase ) _lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase ) _lowerCAmelCase = """lower newer""" _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = processor(text=lowerCamelCase , images=lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE : Optional[Any] = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'facebook/nllb-200-distilled-600M' __UpperCamelCase = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) __UpperCamelCase = 'translator' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = LANGUAGE_CODES __UpperCamelCase = ['text', 'text', 'text'] __UpperCamelCase = ['text'] def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.model.generate(**lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
317
1
"""simple docstring""" class __lowerCamelCase : def __init__(self ): '''simple docstring''' _lowerCAmelCase = {} # Mapping from char to TrieNode _lowerCAmelCase = False def A__ (self , lowerCamelCase ): '''simple docstring''' for word in words: self.insert(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self for char in word: if char not in curr.nodes: _lowerCAmelCase = TrieNode() _lowerCAmelCase = curr.nodes[char] _lowerCAmelCase = True def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self for char in word: if char not in curr.nodes: return False _lowerCAmelCase = curr.nodes[char] return curr.is_leaf def A__ (self , lowerCamelCase ): '''simple docstring''' def _delete(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool: if index == len(lowerCamelCase ): # If word does not exist if not curr.is_leaf: return False _lowerCAmelCase = False return len(curr.nodes ) == 0 _lowerCAmelCase = word[index] _lowerCAmelCase = curr.nodes.get(lowerCamelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _lowerCAmelCase = _delete(lowerCamelCase , lowerCamelCase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowerCamelCase , 0 ) def __UpperCAmelCase ( snake_case_ : TrieNode , snake_case_ : str ) -> None: """simple docstring""" if node.is_leaf: print(snake_case_ , end=""" """ ) for key, value in node.nodes.items(): print_words(snake_case_ , word + key ) def __UpperCAmelCase ( ) -> bool: """simple docstring""" _lowerCAmelCase = """banana bananas bandana band apple all beast""".split() _lowerCAmelCase = TrieNode() root.insert_many(snake_case_ ) # print_words(root, "") assert all(root.find(snake_case_ ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def __UpperCAmelCase ( snake_case_ : str , snake_case_ : bool ) -> None: """simple docstring""" print(str(snake_case_ ) , """works!""" if passes else """doesn't work :(""" ) def __UpperCAmelCase ( ) -> None: """simple docstring""" assert test_trie() def __UpperCAmelCase ( ) -> None: """simple docstring""" print_results("""Testing trie functionality""" , test_trie() ) if __name__ == "__main__": main()
317
"""simple docstring""" from math import isqrt def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , snake_case_ , snake_case_ ): _lowerCAmelCase = False return [i for i in range(2 , snake_case_ ) if is_prime[i]] def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int: """simple docstring""" _lowerCAmelCase = calculate_prime_numbers(max_number // 2 ) _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = len(snake_case_ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> List[str]: """simple docstring""" _lowerCAmelCase = SwinConfig() _lowerCAmelCase = swin_name.split("""_""" ) _lowerCAmelCase = name_split[1] _lowerCAmelCase = int(name_split[4] ) _lowerCAmelCase = int(name_split[3][-1] ) if model_size == "tiny": _lowerCAmelCase = 96 _lowerCAmelCase = (2, 2, 6, 2) _lowerCAmelCase = (3, 6, 12, 24) elif model_size == "small": _lowerCAmelCase = 96 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (3, 6, 12, 24) elif model_size == "base": _lowerCAmelCase = 128 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (4, 8, 16, 32) else: _lowerCAmelCase = 192 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (6, 12, 24, 48) if "in22k" in swin_name: _lowerCAmelCase = 21841 else: _lowerCAmelCase = 1000 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} _lowerCAmelCase = img_size _lowerCAmelCase = num_classes _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = num_heads _lowerCAmelCase = window_size return config def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Dict: """simple docstring""" if "patch_embed.proj" in name: _lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: _lowerCAmelCase = """encoder.""" + name if "attn.proj" in name: _lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _lowerCAmelCase = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": _lowerCAmelCase = """layernorm.weight""" if name == "norm.bias": _lowerCAmelCase = """layernorm.bias""" if "head" in name: _lowerCAmelCase = name.replace("""head""" , """classifier""" ) else: _lowerCAmelCase = """swin.""" + name return name def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCAmelCase = orig_state_dict.pop(snake_case_ ) if "mask" in key: continue elif "qkv" in key: _lowerCAmelCase = key.split(""".""" ) _lowerCAmelCase = int(key_split[1] ) _lowerCAmelCase = int(key_split[3] ) _lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[ :dim ] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[ -dim: ] else: _lowerCAmelCase = val return orig_state_dict def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Optional[Any] ) -> Tuple: """simple docstring""" _lowerCAmelCase = timm.create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() _lowerCAmelCase = get_swin_config(snake_case_ ) _lowerCAmelCase = SwinForImageClassification(snake_case_ ) model.eval() _lowerCAmelCase = convert_state_dict(timm_model.state_dict() , snake_case_ ) model.load_state_dict(snake_case_ ) _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) _lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) _lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" ) _lowerCAmelCase = timm_model(inputs["""pixel_values"""] ) _lowerCAmelCase = model(**snake_case_ ).logits assert torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swin_name''', default='''swin_tiny_patch4_window7_224''', type=str, help='''Name of the Swin timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCamelCase = 'CIDAS/clipseg-rd64-refined' __UpperCamelCase = 'image_segmenter' __UpperCamelCase = CLIPSegForImageSegmentation __UpperCamelCase = ['image', 'text'] __UpperCamelCase = ['image'] def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*lowerCamelCase , **lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase = self.model(**lowerCamelCase ).logits return logits def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = outputs.cpu().detach().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
317
1
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets SCREAMING_SNAKE_CASE : Optional[Any] = datasets.logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Union[str, Any] = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' SCREAMING_SNAKE_CASE : Optional[Any] = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' SCREAMING_SNAKE_CASE : Dict = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' SCREAMING_SNAKE_CASE : Any = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): def A__ (self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def A__ (self , lowerCamelCase ): '''simple docstring''' if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) _lowerCAmelCase = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: _lowerCAmelCase = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: _lowerCAmelCase = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer _lowerCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) _lowerCAmelCase = score.BleurtScorer(os.path.join(lowerCamelCase , lowerCamelCase ) ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.scorer.score(references=lowerCamelCase , candidates=lowerCamelCase ) return {"scores": scores}
317
"""simple docstring""" from __future__ import annotations import queue class __lowerCamelCase : def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def __UpperCAmelCase ( ) -> TreeNode: """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = F"""Enter the left node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = left_node q.put(snake_case_ ) _lowerCAmelCase = F"""Enter the right node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = right_node q.put(snake_case_ ) raise def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case_ ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(snake_case_ ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
317
1
"""simple docstring""" import tensorflow as tf from ...tf_utils import shape_list class __lowerCamelCase ( tf.keras.layers.Layer ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=False , **lowerCamelCase ): '''simple docstring''' super().__init__(**lowerCamelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = d_embed _lowerCAmelCase = d_proj _lowerCAmelCase = cutoffs + [vocab_size] _lowerCAmelCase = [0] + self.cutoffs _lowerCAmelCase = div_val _lowerCAmelCase = self.cutoffs[0] _lowerCAmelCase = len(self.cutoffs ) - 1 _lowerCAmelCase = self.shortlist_size + self.n_clusters _lowerCAmelCase = keep_order _lowerCAmelCase = [] _lowerCAmelCase = [] def A__ (self , lowerCamelCase ): '''simple docstring''' if self.n_clusters > 0: _lowerCAmelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase , name="""cluster_weight""" ) _lowerCAmelCase = self.add_weight( shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCamelCase , name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: _lowerCAmelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"""out_projs_._{i}""" , ) self.out_projs.append(lowerCamelCase ) else: self.out_projs.append(lowerCamelCase ) _lowerCAmelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"""out_layers_._{i}_._weight""" , ) _lowerCAmelCase = self.add_weight( shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): _lowerCAmelCase , _lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _lowerCAmelCase = self.d_embed // (self.div_val**i) _lowerCAmelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"""out_projs_._{i}""" ) self.out_projs.append(lowerCamelCase ) _lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"""out_layers_._{i}_._weight""" , ) _lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) super().build(lowerCamelCase ) @staticmethod def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase = x if proj is not None: _lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , lowerCamelCase , lowerCamelCase ) return tf.einsum("""ibd,nd->ibn""" , lowerCamelCase , lowerCamelCase ) + b @staticmethod def A__ (lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = shape_list(lowerCamelCase ) _lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype ) _lowerCAmelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(lowerCamelCase , lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase = 0 if self.n_clusters == 0: _lowerCAmelCase = self._logit(lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: _lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase , logits=lowerCamelCase ) _lowerCAmelCase = tf.nn.log_softmax(lowerCamelCase , axis=-1 ) else: _lowerCAmelCase = shape_list(lowerCamelCase ) _lowerCAmelCase = [] _lowerCAmelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): _lowerCAmelCase , _lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: _lowerCAmelCase = (target >= l_idx) & (target < r_idx) _lowerCAmelCase = tf.where(lowerCamelCase ) _lowerCAmelCase = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) - l_idx if self.div_val == 1: _lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx] _lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx] else: _lowerCAmelCase = self.out_layers[i][0] _lowerCAmelCase = self.out_layers[i][1] if i == 0: _lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) _lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) _lowerCAmelCase = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[0] ) _lowerCAmelCase = tf.nn.log_softmax(lowerCamelCase ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: _lowerCAmelCase = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = self._gather_logprob(lowerCamelCase , lowerCamelCase ) else: _lowerCAmelCase = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[i] ) _lowerCAmelCase = tf.nn.log_softmax(lowerCamelCase ) _lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster _lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(lowerCamelCase ) if target is not None: _lowerCAmelCase = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = self._gather_logprob(lowerCamelCase , lowerCamelCase ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(lowerCamelCase , -cur_logprob , shape_list(lowerCamelCase ) ) _lowerCAmelCase = tf.concat(lowerCamelCase , axis=-1 ) if target is not None: if return_mean: _lowerCAmelCase = tf.reduce_mean(lowerCamelCase ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(lowerCamelCase ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(lowerCamelCase , name=self.name , aggregation="""mean""" if return_mean else """""" ) return out
317
"""simple docstring""" from __future__ import annotations class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = text, pattern _lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def A__ (self ): '''simple docstring''' _lowerCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): _lowerCAmelCase = self.mismatch_in_text(lowerCamelCase ) if mismatch_index == -1: positions.append(lowerCamelCase ) else: _lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) _lowerCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions SCREAMING_SNAKE_CASE : Any = '''ABAABA''' SCREAMING_SNAKE_CASE : Optional[int] = '''AB''' SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern) SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
317
1
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __UpperCAmelCase ( snake_case_ : Features ) -> Optional[int]: """simple docstring""" _lowerCAmelCase = np.inf def set_batch_size(snake_case_ : FeatureType ) -> None: nonlocal batch_size if isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = min(snake_case_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = min(snake_case_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(snake_case_ , snake_case_ ) and feature.dtype == "binary": _lowerCAmelCase = min(snake_case_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(snake_case_ , snake_case_ ) return None if batch_size is np.inf else batch_size class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ): '''simple docstring''' super().__init__( lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , ) _lowerCAmelCase = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths} _lowerCAmelCase = _PACKAGED_DATASETS_MODULES["""parquet"""][1] _lowerCAmelCase = Parquet( cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , hash=lowerCamelCase , **lowerCamelCase , ) def A__ (self ): '''simple docstring''' if self.streaming: _lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None self.builder.download_and_prepare( download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , ) _lowerCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory ) return dataset class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = dataset _lowerCAmelCase = path_or_buf _lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _lowerCAmelCase = parquet_writer_kwargs def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , """wb+""" ) as buffer: _lowerCAmelCase = self._write(file_obj=lowerCamelCase , batch_size=lowerCamelCase , **self.parquet_writer_kwargs ) else: _lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCamelCase , **self.parquet_writer_kwargs ) return written def A__ (self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = 0 _lowerCAmelCase = parquet_writer_kwargs.pop("""path_or_buf""" , lowerCamelCase ) _lowerCAmelCase = self.dataset.features.arrow_schema _lowerCAmelCase = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase , **lowerCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , lowerCamelCase ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ): _lowerCAmelCase = query_table( table=self.dataset._data , key=slice(lowerCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowerCamelCase ) written += batch.nbytes writer.close() return written
317
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
1
"""simple docstring""" import os import sys SCREAMING_SNAKE_CASE : Dict = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) SCREAMING_SNAKE_CASE : Dict = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def __UpperCAmelCase ( *snake_case_ : List[str] , **snake_case_ : List[str] ) -> Optional[int]: """simple docstring""" return AutoConfig.from_pretrained(*snake_case_ , **snake_case_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __UpperCAmelCase ( *snake_case_ : Any , **snake_case_ : Optional[Any] ) -> Dict: """simple docstring""" return AutoTokenizer.from_pretrained(*snake_case_ , **snake_case_ ) @add_start_docstrings(AutoModel.__doc__ ) def __UpperCAmelCase ( *snake_case_ : Optional[Any] , **snake_case_ : Dict ) -> Dict: """simple docstring""" return AutoModel.from_pretrained(*snake_case_ , **snake_case_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __UpperCAmelCase ( *snake_case_ : Union[str, Any] , **snake_case_ : str ) -> List[str]: """simple docstring""" return AutoModelForCausalLM.from_pretrained(*snake_case_ , **snake_case_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __UpperCAmelCase ( *snake_case_ : Optional[int] , **snake_case_ : List[str] ) -> List[Any]: """simple docstring""" return AutoModelForMaskedLM.from_pretrained(*snake_case_ , **snake_case_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __UpperCAmelCase ( *snake_case_ : Tuple , **snake_case_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" return AutoModelForSequenceClassification.from_pretrained(*snake_case_ , **snake_case_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __UpperCAmelCase ( *snake_case_ : Dict , **snake_case_ : List[str] ) -> Optional[Any]: """simple docstring""" return AutoModelForQuestionAnswering.from_pretrained(*snake_case_ , **snake_case_ )
317
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def A__ (self , lowerCamelCase , lowerCamelCase=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1e-3 ) def A__ (self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A__ (self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ (self ): '''simple docstring''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def A__ (self ): '''simple docstring''' _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
317
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase ) _lowerCAmelCase = model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(lowerCamelCase ) model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase , streamer=lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase ) _lowerCAmelCase = model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(lowerCamelCase ) _lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=lowerCamelCase ) thread.start() _lowerCAmelCase = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase ) _lowerCAmelCase = model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(lowerCamelCase , skip_prompt=lowerCamelCase ) model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase , streamer=lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""distilgpt2""" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(lowerCamelCase , skip_special_tokens=lowerCamelCase ) model.generate(lowerCamelCase , max_new_tokens=1 , do_sample=lowerCamelCase , streamer=lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(lowerCamelCase , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase ) _lowerCAmelCase = TextIteratorStreamer(lowerCamelCase , timeout=0.001 ) _lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase ): _lowerCAmelCase = """""" for new_text in streamer: streamer_text += new_text
317
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict: """simple docstring""" return getitem, k def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]: """simple docstring""" return setitem, k, v def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]: """simple docstring""" return delitem, k def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str: """simple docstring""" try: return fun(snake_case_ , *snake_case_ ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE : int = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) SCREAMING_SNAKE_CASE : List[Any] = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] SCREAMING_SNAKE_CASE : Any = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] SCREAMING_SNAKE_CASE : Union[str, Any] = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] SCREAMING_SNAKE_CASE : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple: """simple docstring""" _lowerCAmelCase = HashMap(initial_block_size=4 ) _lowerCAmelCase = {} for _, (fun, *args) in enumerate(snake_case_ ): _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) assert my_res == py_res assert str(snake_case_ ) == str(snake_case_ ) assert set(snake_case_ ) == set(snake_case_ ) assert len(snake_case_ ) == len(snake_case_ ) assert set(my.items() ) == set(py.items() ) def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" def is_public(snake_case_ : str ) -> bool: return not name.startswith("""_""" ) _lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )} _lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )} assert dict_public_names > hash_public_names
317
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html SCREAMING_SNAKE_CASE : Any = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Tuple=None , ) -> Optional[int]: """simple docstring""" if attention_mask is None: _lowerCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0.02 , ): '''simple docstring''' _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = eos_token_id _lowerCAmelCase = pad_token_id _lowerCAmelCase = bos_token_id _lowerCAmelCase = initializer_range def A__ (self ): '''simple docstring''' _lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowerCAmelCase = shift_tokens_right(lowerCamelCase , 1 , 2 ) _lowerCAmelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase , ) _lowerCAmelCase = prepare_blenderbot_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return config, inputs_dict def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = 20 _lowerCAmelCase = model_class_name(lowerCamelCase ) _lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _lowerCAmelCase , _lowerCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _lowerCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCAmelCase = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) _lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _lowerCAmelCase = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , ) _lowerCAmelCase = model.decode(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = 20 _lowerCAmelCase = model_class_name(lowerCamelCase ) _lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] ) _lowerCAmelCase , _lowerCAmelCase = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _lowerCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCAmelCase = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) _lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _lowerCAmelCase = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) _lowerCAmelCase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase ) _lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowerCamelCase ( unittest.TestCase ): __UpperCamelCase = 99 def A__ (self ): '''simple docstring''' _lowerCAmelCase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _lowerCAmelCase = input_ids.shape[0] _lowerCAmelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._get_config_and_data() _lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(lowerCamelCase ) _lowerCAmelCase = lm_model(input_ids=lowerCamelCase ) _lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(lowerCamelCase ) _lowerCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _lowerCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _lowerCAmelCase = lm_model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ) _lowerCAmelCase = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _lowerCAmelCase = shift_tokens_right(lowerCamelCase , 1 , 2 ) _lowerCAmelCase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum() _lowerCAmelCase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowerCamelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __lowerCamelCase ( __lowercase , unittest.TestCase , __lowercase ): __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def A__ (self ): '''simple docstring''' _lowerCAmelCase = FlaxBlenderbotModelTester(self ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_class(lowerCamelCase ) @jax.jit def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ): return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ) with self.subTest("""JIT Enabled""" ): _lowerCAmelCase = encode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _lowerCAmelCase = encode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase = model_class(lowerCamelCase ) _lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _lowerCAmelCase = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ): return model.decode( decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , ) with self.subTest("""JIT Enabled""" ): _lowerCAmelCase = decode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _lowerCAmelCase = decode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def A__ (self ): '''simple docstring''' for model_class_name in self.all_model_classes: _lowerCAmelCase = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id _lowerCAmelCase = model(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" ) @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25} _lowerCAmelCase = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} _lowerCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCamelCase ) _lowerCAmelCase = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" ) _lowerCAmelCase = ["""Sam"""] _lowerCAmelCase = tokenizer(lowerCamelCase , return_tensors="""jax""" ) _lowerCAmelCase = model.generate(**lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = """Sam is a great name. It means \"sun\" in Gaelic.""" _lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase , **lowerCamelCase ) assert generated_txt[0].strip() == tgt_text
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations(snake_case_ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( snake_case_ : int , snake_case_ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case_ ) for item in array ) _lowerCAmelCase = answer return answer _lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" _lowerCAmelCase = [0] * (target + 1) _lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(snake_case_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Any = 5 SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5] print(combination_sum_iv(n, array, target))
317
1
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = {'''vocab_file''': '''vocab.json'''} SCREAMING_SNAKE_CASE : int = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } SCREAMING_SNAKE_CASE : List[str] = {'''mgp-str''': 2_7} class __lowerCamelCase ( __lowercase ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self , lowerCamelCase , lowerCamelCase="[GO]" , lowerCamelCase="[GO]" , lowerCamelCase="[s]" , lowerCamelCase="[GO]" , **lowerCamelCase ): '''simple docstring''' super().__init__( unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle: _lowerCAmelCase = json.load(lowerCamelCase ) _lowerCAmelCase = {v: k for k, v in self.vocab.items()} @property def A__ (self ): '''simple docstring''' return len(self.vocab ) def A__ (self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = [] for s in text: char_tokens.extend(lowerCamelCase ) return char_tokens def A__ (self , lowerCamelCase ): '''simple docstring''' return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase ) ) return _lowerCAmelCase = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" ) return (vocab_file,)
317
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None: """simple docstring""" _lowerCAmelCase = "" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ): _lowerCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case_ ) return decoded def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]: """simple docstring""" _lowerCAmelCase = [] for key in product(snake_case_ , repeat=3 ): _lowerCAmelCase = try_key(snake_case_ , snake_case_ ) if encoded is not None: possibles.append(snake_case_ ) return possibles def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" ) _lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )] _lowerCAmelCase = filter_valid_chars(snake_case_ ) for common_word in COMMON_WORDS: _lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ ) if len(snake_case_ ) == 1: break _lowerCAmelCase = possibles[0] return sum(ord(snake_case_ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = '''▁''' SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE : List[str] = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}, '''tokenizer_file''': { '''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json''' }, } SCREAMING_SNAKE_CASE : Optional[int] = { '''google/pegasus-xsum''': 5_1_2, } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = PegasusTokenizer __UpperCamelCase = ['input_ids', 'attention_mask'] def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f"""additional_special_tokens should be of type {type(lowerCamelCase )}, but is""" f""" {type(lowerCamelCase )}""" ) _lowerCAmelCase = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) _lowerCAmelCase = additional_special_tokens_extended else: _lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) _lowerCAmelCase = vocab_file _lowerCAmelCase = False if not self.vocab_file else True def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" ) return [1 if x in all_special_ids else 0 for x in seq] def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ): '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def A__ (self , lowerCamelCase , lowerCamelCase=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCAmelCase = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int: """simple docstring""" _lowerCAmelCase = limit + 1 _lowerCAmelCase = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE : int = '''RegNetConfig''' # Base docstring SCREAMING_SNAKE_CASE : str = '''facebook/regnet-y-040''' SCREAMING_SNAKE_CASE : List[Any] = [1, 1_0_8_8, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE : Any = '''facebook/regnet-y-040''' SCREAMING_SNAKE_CASE : Dict = '''tabby, tabby cat''' SCREAMING_SNAKE_CASE : Any = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 3 , lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = "relu" , ): '''simple docstring''' super().__init__() _lowerCAmelCase = nn.Convad( lowerCamelCase , lowerCamelCase , kernel_size=lowerCamelCase , stride=lowerCamelCase , padding=kernel_size // 2 , groups=lowerCamelCase , bias=lowerCamelCase , ) _lowerCAmelCase = nn.BatchNormad(lowerCamelCase ) _lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.convolution(lowerCamelCase ) _lowerCAmelCase = self.normalization(lowerCamelCase ) _lowerCAmelCase = self.activation(lowerCamelCase ) return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase ): '''simple docstring''' super().__init__() _lowerCAmelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) _lowerCAmelCase = config.num_channels def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) _lowerCAmelCase = self.embedder(lowerCamelCase ) return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 ): '''simple docstring''' super().__init__() _lowerCAmelCase = nn.Convad(lowerCamelCase , lowerCamelCase , kernel_size=1 , stride=lowerCamelCase , bias=lowerCamelCase ) _lowerCAmelCase = nn.BatchNormad(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.convolution(lowerCamelCase ) _lowerCAmelCase = self.normalization(lowerCamelCase ) return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' super().__init__() _lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) _lowerCAmelCase = nn.Sequential( nn.Convad(lowerCamelCase , lowerCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase , lowerCamelCase , kernel_size=1 ) , nn.Sigmoid() , ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.pooler(lowerCamelCase ) _lowerCAmelCase = self.attention(lowerCamelCase ) _lowerCAmelCase = hidden_state * attention return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 ): '''simple docstring''' super().__init__() _lowerCAmelCase = in_channels != out_channels or stride != 1 _lowerCAmelCase = max(1 , out_channels // config.groups_width ) _lowerCAmelCase = ( RegNetShortCut(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase ) if should_apply_shortcut else nn.Identity() ) _lowerCAmelCase = nn.Sequential( RegNetConvLayer(lowerCamelCase , lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase , lowerCamelCase , kernel_size=1 , activation=lowerCamelCase ) , ) _lowerCAmelCase = ACTaFN[config.hidden_act] def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = hidden_state _lowerCAmelCase = self.layer(lowerCamelCase ) _lowerCAmelCase = self.shortcut(lowerCamelCase ) hidden_state += residual _lowerCAmelCase = self.activation(lowerCamelCase ) return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 ): '''simple docstring''' super().__init__() _lowerCAmelCase = in_channels != out_channels or stride != 1 _lowerCAmelCase = max(1 , out_channels // config.groups_width ) _lowerCAmelCase = ( RegNetShortCut(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase ) if should_apply_shortcut else nn.Identity() ) _lowerCAmelCase = nn.Sequential( RegNetConvLayer(lowerCamelCase , lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase , lowerCamelCase , kernel_size=1 , activation=lowerCamelCase ) , ) _lowerCAmelCase = ACTaFN[config.hidden_act] def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = hidden_state _lowerCAmelCase = self.layer(lowerCamelCase ) _lowerCAmelCase = self.shortcut(lowerCamelCase ) hidden_state += residual _lowerCAmelCase = self.activation(lowerCamelCase ) return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 2 , ): '''simple docstring''' super().__init__() _lowerCAmelCase = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer _lowerCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , ) , *[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for _ in range(depth - 1 )] , ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.layers(lowerCamelCase ) return hidden_state class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase ): '''simple docstring''' super().__init__() _lowerCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCamelCase , config.depths[1:] ): self.stages.append(RegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase ) ) def A__ (self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ): '''simple docstring''' _lowerCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase = hidden_states + (hidden_state,) _lowerCAmelCase = stage_module(lowerCamelCase ) if output_hidden_states: _lowerCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = RegNetConfig __UpperCamelCase = 'regnet' __UpperCamelCase = 'pixel_values' __UpperCamelCase = True def A__ (self , lowerCamelCase ): '''simple docstring''' if isinstance(lowerCamelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def A__ (self , lowerCamelCase , lowerCamelCase=False ): '''simple docstring''' if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = value SCREAMING_SNAKE_CASE : int = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' SCREAMING_SNAKE_CASE : Any = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , __lowercase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase ): '''simple docstring''' super().__init__(lowerCamelCase ) _lowerCAmelCase = config _lowerCAmelCase = RegNetEmbeddings(lowerCamelCase ) _lowerCAmelCase = RegNetEncoder(lowerCamelCase ) _lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase = self.embedder(lowerCamelCase ) _lowerCAmelCase = self.encoder( lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase ) _lowerCAmelCase = encoder_outputs[0] _lowerCAmelCase = self.pooler(lowerCamelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowercase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase ): '''simple docstring''' super().__init__(lowerCamelCase ) _lowerCAmelCase = config.num_labels _lowerCAmelCase = RegNetModel(lowerCamelCase ) # classification head _lowerCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A__ (self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): '''simple docstring''' _lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase = self.regnet(lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase ) _lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase = self.classifier(lowerCamelCase ) _lowerCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _lowerCAmelCase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _lowerCAmelCase = """single_label_classification""" else: _lowerCAmelCase = """multi_label_classification""" if self.config.problem_type == "regression": _lowerCAmelCase = MSELoss() if self.num_labels == 1: _lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _lowerCAmelCase = loss_fct(lowerCamelCase , lowerCamelCase ) elif self.config.problem_type == "single_label_classification": _lowerCAmelCase = CrossEntropyLoss() _lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _lowerCAmelCase = BCEWithLogitsLoss() _lowerCAmelCase = loss_fct(lowerCamelCase , lowerCamelCase ) if not return_dict: _lowerCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states )
317
"""simple docstring""" from functools import reduce SCREAMING_SNAKE_CASE : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __UpperCAmelCase ( snake_case_ : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import requests SCREAMING_SNAKE_CASE : int = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def __UpperCAmelCase ( snake_case_ : str ) -> None: """simple docstring""" _lowerCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["""articles"""] , 1 ): print(F"""{i}.) {article["title"]}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import os SCREAMING_SNAKE_CASE : List[Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0} def __UpperCAmelCase ( snake_case_ : str ) -> int: """simple docstring""" _lowerCAmelCase = 0 _lowerCAmelCase = 0 while index < len(snake_case_ ) - 1: _lowerCAmelCase = SYMBOLS[numerals[index]] _lowerCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def __UpperCAmelCase ( snake_case_ : int ) -> str: """simple docstring""" _lowerCAmelCase = """""" _lowerCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _lowerCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _lowerCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def __UpperCAmelCase ( snake_case_ : str = "/p089_roman.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 0 with open(os.path.dirname(snake_case_ ) + roman_numerals_filename ) as filea: _lowerCAmelCase = filea.readlines() for line in lines: _lowerCAmelCase = line.strip() _lowerCAmelCase = parse_roman_numerals(snake_case_ ) _lowerCAmelCase = generate_roman_numerals(snake_case_ ) savings += len(snake_case_ ) - len(snake_case_ ) return savings if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __UpperCamelCase = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) __UpperCamelCase = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple: """simple docstring""" logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) _lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCAmelCase = SeqaSeqDataset # Get datasets _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCAmelCase = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) _lowerCAmelCase = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) _lowerCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowerCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCAmelCase = train_result.metrics _lowerCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _lowerCAmelCase = data_args.n_val _lowerCAmelCase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" ) _lowerCAmelCase = test_output.metrics _lowerCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _lowerCAmelCase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: _lowerCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) _lowerCAmelCase = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def __UpperCAmelCase ( snake_case_ : Any ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
317
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Optional[Any] ) -> List[str]: """simple docstring""" _lowerCAmelCase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) _lowerCAmelCase = DatasetInfosDict.from_directory(snake_case_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : DatasetInfo ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = str(snake_case_ ) dataset_info.write_to_directory(snake_case_ ) _lowerCAmelCase = DatasetInfo.from_directory(snake_case_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(snake_case_ , """dataset_info.json""" ) ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _lowerCAmelCase = dataset_info._to_yaml_dict() assert sorted(snake_case_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _lowerCAmelCase = yaml.safe_dump(snake_case_ ) _lowerCAmelCase = yaml.safe_load(snake_case_ ) assert dataset_info_yaml_dict == reloaded def __UpperCAmelCase ( ) -> Dict: """simple docstring""" _lowerCAmelCase = DatasetInfo() _lowerCAmelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : DatasetInfosDict ) -> List[Any]: """simple docstring""" _lowerCAmelCase = str(snake_case_ ) dataset_infos_dict.write_to_directory(snake_case_ ) _lowerCAmelCase = DatasetInfosDict.from_directory(snake_case_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _lowerCAmelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _lowerCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(snake_case_ , """README.md""" ) )
317
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
1
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=64 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=1 , ): '''simple docstring''' _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope _lowerCAmelCase = q_groups _lowerCAmelCase = k_groups _lowerCAmelCase = v_groups _lowerCAmelCase = post_attention_groups _lowerCAmelCase = intermediate_groups _lowerCAmelCase = output_groups def A__ (self ): '''simple docstring''' _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ (self ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = SqueezeBertModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = SqueezeBertForMaskedLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = SqueezeBertForQuestionAnswering(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model( lowerCamelCase , attention_mask=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = SqueezeBertForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = SqueezeBertForTokenClassification(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_choices _lowerCAmelCase = SqueezeBertForMultipleChoice(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ): __UpperCamelCase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __UpperCamelCase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = False def A__ (self ): '''simple docstring''' _lowerCAmelCase = SqueezeBertModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase , dim=37 ) def A__ (self ): '''simple docstring''' self.config_tester.run_common_tests() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase ) @slow def A__ (self ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SqueezeBertModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @require_sentencepiece @require_tokenizers @require_torch class __lowerCamelCase ( unittest.TestCase ): @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) _lowerCAmelCase = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) _lowerCAmelCase = model(lowerCamelCase )[0] _lowerCAmelCase = torch.Size((1, 3) ) self.assertEqual(output.shape , lowerCamelCase ) _lowerCAmelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-4 ) )
317
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size def A__ (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None def A__ (self ): '''simple docstring''' _lowerCAmelCase = MobileNetVaImageProcessingTester(self ) @property def A__ (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ (self ): '''simple docstring''' pass def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
317
1
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" for i in range(len(snake_case_ ) - 1 , 0 , -1 ): _lowerCAmelCase = False for j in range(snake_case_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j] _lowerCAmelCase = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j] _lowerCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')] print(F'{cocktail_shaker_sort(unsorted) = }')
317
1
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py SCREAMING_SNAKE_CASE : int = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE : Any = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) SCREAMING_SNAKE_CASE : Union[str, Any] = [ ('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''), ('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''), ('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''), ('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''), ('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''), ('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''), ('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''), ('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''), ('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''), ( '''zero-shot-object-detection''', '''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForZeroShotObjectDetection''', ), ('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''), ('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''), ('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''), ('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''), ( '''table-question-answering''', '''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForTableQuestionAnswering''', ), ('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''), ('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''), ( '''next-sentence-prediction''', '''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''', '''AutoModelForNextSentencePrediction''', ), ( '''audio-frame-classification''', '''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioFrameClassification''', ), ('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''), ( '''document-question-answering''', '''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForDocumentQuestionAnswering''', ), ( '''visual-question-answering''', '''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForVisualQuestionAnswering''', ), ('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''), ( '''zero-shot-image-classification''', '''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForZeroShotImageClassification''', ), ('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''), ('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''), ('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''), ] def __UpperCAmelCase ( snake_case_ : Any ) -> str: """simple docstring""" _lowerCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , snake_case_ ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _lowerCAmelCase = { config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. _lowerCAmelCase = collections.defaultdict(snake_case_ ) _lowerCAmelCase = collections.defaultdict(snake_case_ ) _lowerCAmelCase = collections.defaultdict(snake_case_ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(snake_case_ ): _lowerCAmelCase = None if _re_tf_models.match(snake_case_ ) is not None: _lowerCAmelCase = tf_models _lowerCAmelCase = _re_tf_models.match(snake_case_ ).groups()[0] elif _re_flax_models.match(snake_case_ ) is not None: _lowerCAmelCase = flax_models _lowerCAmelCase = _re_flax_models.match(snake_case_ ).groups()[0] elif _re_pt_models.match(snake_case_ ) is not None: _lowerCAmelCase = pt_models _lowerCAmelCase = _re_pt_models.match(snake_case_ ).groups()[0] if lookup_dict is not None: while len(snake_case_ ) > 0: if attr_name in model_prefix_to_model_type: _lowerCAmelCase = True break # Try again after removing the last word in the name _lowerCAmelCase = """""".join(camel_case_split(snake_case_ )[:-1] ) _lowerCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) _lowerCAmelCase = list(snake_case_ ) all_models.sort() _lowerCAmelCase = {"""model_type""": all_models} _lowerCAmelCase = [pt_models[t] for t in all_models] _lowerCAmelCase = [tf_models[t] for t in all_models] _lowerCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure _lowerCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: _lowerCAmelCase = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: _lowerCAmelCase = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: _lowerCAmelCase = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. _lowerCAmelCase = """AutoTokenizer""" _lowerCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(snake_case_ ) def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Dict: """simple docstring""" _lowerCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: _lowerCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""] _lowerCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(snake_case_ , snake_case_ , snake_case_ ): # The type of pipeline may not exist in this framework if not hasattr(snake_case_ , snake_case_ ): continue # First extract all model_names _lowerCAmelCase = [] for name in getattr(snake_case_ , snake_case_ ).values(): if isinstance(snake_case_ , snake_case_ ): model_names.append(snake_case_ ) else: model_names.extend(list(snake_case_ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> List[str]: """simple docstring""" _lowerCAmelCase = get_frameworks_table() _lowerCAmelCase = Dataset.from_pandas(snake_case_ ) _lowerCAmelCase = hf_hub_download( """huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=snake_case_ ) _lowerCAmelCase = Dataset.from_json(snake_case_ ) _lowerCAmelCase = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(snake_case_ ) ) } _lowerCAmelCase = update_pipeline_and_auto_class_table(snake_case_ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. _lowerCAmelCase = sorted(table.keys() ) _lowerCAmelCase = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) _lowerCAmelCase = Dataset.from_pandas(snake_case_ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(snake_case_ , """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(snake_case_ , """pipeline_tags.json""" ) ) if commit_sha is not None: _lowerCAmelCase = ( F"""Update with commit {commit_sha}\n\nSee: """ F"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: _lowerCAmelCase = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""" , folder_path=snake_case_ , repo_type="""dataset""" , token=snake_case_ , commit_message=snake_case_ , ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} _lowerCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS _lowerCAmelCase = [] for key in pipeline_tasks: if key not in in_table: _lowerCAmelCase = pipeline_tasks[key]["""pt"""] if isinstance(snake_case_ , (list, tuple) ): _lowerCAmelCase = model[0] _lowerCAmelCase = model.__name__ if model not in in_table.values(): missing.append(snake_case_ ) if len(snake_case_ ) > 0: _lowerCAmelCase = """, """.join(snake_case_ ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ F"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser() parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''') parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''') parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''') SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
317
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple: """simple docstring""" def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: """simple docstring""" _lowerCAmelCase = random.Random() _lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "TensorFlow" @property def A__ (self ): '''simple docstring''' return tf.__version__ def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) _lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients _lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ (self , lowerCamelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) _lowerCAmelCase = meminfo.used _lowerCAmelCase = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase = None else: _lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase ) _lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase = stop_memory_tracing(lowerCamelCase ) if memory is None: _lowerCAmelCase = summary.total else: _lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
317
1
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ['image_processor', 'tokenizer'] __UpperCamelCase = 'BlipImageProcessor' __UpperCamelCase = ('BertTokenizer', 'BertTokenizerFast') def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = False super().__init__(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = self.image_processor def __call__(self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ): '''simple docstring''' if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: _lowerCAmelCase = self.tokenizer _lowerCAmelCase = self.tokenizer( text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , ) return text_encoding # add pixel_values _lowerCAmelCase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase ) if text is not None: _lowerCAmelCase = self.tokenizer( text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , ) else: _lowerCAmelCase = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase ) return encoding_image_processor def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase ) def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase ) @property def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
317
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { '''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'transfo-xl' __UpperCamelCase = ['mems'] __UpperCamelCase = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = vocab_size _lowerCAmelCase = [] self.cutoffs.extend(lowerCamelCase ) if proj_share_all_but_first: _lowerCAmelCase = [False] + [True] * len(self.cutoffs ) else: _lowerCAmelCase = [False] + [False] * len(self.cutoffs ) _lowerCAmelCase = d_model _lowerCAmelCase = d_embed _lowerCAmelCase = d_head _lowerCAmelCase = d_inner _lowerCAmelCase = div_val _lowerCAmelCase = pre_lnorm _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = mem_len _lowerCAmelCase = same_length _lowerCAmelCase = attn_type _lowerCAmelCase = clamp_len _lowerCAmelCase = sample_softmax _lowerCAmelCase = adaptive _lowerCAmelCase = dropout _lowerCAmelCase = dropatt _lowerCAmelCase = untie_r _lowerCAmelCase = init _lowerCAmelCase = init_range _lowerCAmelCase = proj_init_std _lowerCAmelCase = init_std _lowerCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase ) @property def A__ (self ): '''simple docstring''' logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def A__ (self , lowerCamelCase ): '''simple docstring''' raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
317
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
"""simple docstring""" import math def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 2 _lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowerCAmelCase = [True] * (end + 1) _lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowerCAmelCase = False start += 1 prime += in_prime _lowerCAmelCase = end + 1 _lowerCAmelCase = min(2 * end , snake_case_ ) while low <= n: _lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: _lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowerCAmelCase = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowerCAmelCase = high + 1 _lowerCAmelCase = min(high + end , snake_case_ ) return prime print(sieve(1_0**6))
317
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'SpeechT5FeatureExtractor' __UpperCamelCase = 'SpeechT5Tokenizer' def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' super().__init__(lowerCamelCase , lowerCamelCase ) def __call__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = kwargs.pop("""audio""" , lowerCamelCase ) _lowerCAmelCase = kwargs.pop("""text""" , lowerCamelCase ) _lowerCAmelCase = kwargs.pop("""text_target""" , lowerCamelCase ) _lowerCAmelCase = kwargs.pop("""audio_target""" , lowerCamelCase ) _lowerCAmelCase = kwargs.pop("""sampling_rate""" , lowerCamelCase ) if audio is not None and text is not None: raise ValueError( """Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" ) if audio_target is not None and text_target is not None: raise ValueError( """Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( """You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" ) if audio is not None: _lowerCAmelCase = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase ) elif text is not None: _lowerCAmelCase = self.tokenizer(lowerCamelCase , **lowerCamelCase ) else: _lowerCAmelCase = None if audio_target is not None: _lowerCAmelCase = self.feature_extractor(audio_target=lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = targets["""input_values"""] elif text_target is not None: _lowerCAmelCase = self.tokenizer(lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = targets["""input_ids"""] else: _lowerCAmelCase = None if inputs is None: return targets if targets is not None: _lowerCAmelCase = labels _lowerCAmelCase = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: _lowerCAmelCase = decoder_attention_mask return inputs def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = kwargs.pop("""input_values""" , lowerCamelCase ) _lowerCAmelCase = kwargs.pop("""input_ids""" , lowerCamelCase ) _lowerCAmelCase = kwargs.pop("""labels""" , lowerCamelCase ) if input_values is not None and input_ids is not None: raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" ) if input_values is None and input_ids is None and labels is None: raise ValueError( """You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" ) if input_values is not None: _lowerCAmelCase = self.feature_extractor.pad(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) elif input_ids is not None: _lowerCAmelCase = self.tokenizer.pad(lowerCamelCase , **lowerCamelCase ) else: _lowerCAmelCase = None if labels is not None: if "input_ids" in labels or (isinstance(lowerCamelCase , lowerCamelCase ) and "input_ids" in labels[0]): _lowerCAmelCase = self.tokenizer.pad(lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = targets["""input_ids"""] else: _lowerCAmelCase = self.feature_extractor.feature_size _lowerCAmelCase = self.feature_extractor.num_mel_bins _lowerCAmelCase = self.feature_extractor.pad(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = feature_size_hack _lowerCAmelCase = targets["""input_values"""] else: _lowerCAmelCase = None if inputs is None: return targets if targets is not None: _lowerCAmelCase = labels _lowerCAmelCase = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: _lowerCAmelCase = decoder_attention_mask return inputs def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase ) def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
317
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0 SCREAMING_SNAKE_CASE : Optional[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = '''''' SCREAMING_SNAKE_CASE : List[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = 2_5_0 def __UpperCAmelCase ( ) -> None: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ ) for index in range(snake_case_ ): _lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase = random_chars(32 ) _lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] _lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) _lowerCAmelCase = [] for anno in new_annos: _lowerCAmelCase = anno[3] - anno[1] _lowerCAmelCase = anno[4] - anno[2] _lowerCAmelCase = anno[1] + width / 2 _lowerCAmelCase = anno[2] + height / 2 _lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(snake_case_ ) with open(F"""{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ): _lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(snake_case_ ) as in_file: _lowerCAmelCase = in_file.readlines() _lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" ) _lowerCAmelCase = [] for obj_list in obj_lists: _lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ ) _lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2 _lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(snake_case_ ) labels.append(snake_case_ ) return img_paths, labels def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" _lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = int(scale_x * output_size[1] ) _lowerCAmelCase = int(scale_y * output_size[0] ) _lowerCAmelCase = [] _lowerCAmelCase = [] for i, index in enumerate(snake_case_ ): _lowerCAmelCase = all_img_list[index] path_list.append(snake_case_ ) _lowerCAmelCase = all_annos[index] _lowerCAmelCase = cva.imread(snake_case_ ) if i == 0: # top-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _lowerCAmelCase = cva.resize( snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _lowerCAmelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __UpperCAmelCase ( snake_case_ : int ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase = ascii_lowercase + digits return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : str ) -> list: """simple docstring""" _lowerCAmelCase = [0] * len(snake_case_ ) for i in range(1 , len(snake_case_ ) ): # use last results for better performance - dynamic programming _lowerCAmelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _lowerCAmelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _lowerCAmelCase = j return prefix_result def __UpperCAmelCase ( snake_case_ : str ) -> int: """simple docstring""" return max(prefix_function(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main _lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
317
1
"""simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} SCREAMING_SNAKE_CASE : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __UpperCAmelCase ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]: """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(snake_case_ , snake_case_ , snake_case_ ) order.append(snake_case_ ) return order def __UpperCAmelCase ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]: """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(snake_case_ , snake_case_ , snake_case_ ) return component def __UpperCAmelCase ( snake_case_ : dict[int, list[int]] ) -> list[list[int]]: """simple docstring""" _lowerCAmelCase = len(snake_case_ ) * [False] _lowerCAmelCase = {vert: [] for vert in range(len(snake_case_ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(snake_case_ ) _lowerCAmelCase = [] for i, was_visited in enumerate(snake_case_ ): if not was_visited: order += topology_sort(snake_case_ , snake_case_ , snake_case_ ) _lowerCAmelCase = [] _lowerCAmelCase = len(snake_case_ ) * [False] for i in range(len(snake_case_ ) ): _lowerCAmelCase = order[len(snake_case_ ) - i - 1] if not visited[vert]: _lowerCAmelCase = find_components(snake_case_ , snake_case_ , snake_case_ ) components_list.append(snake_case_ ) return components_list
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE : Optional[Any] = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'facebook/nllb-200-distilled-600M' __UpperCamelCase = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) __UpperCamelCase = 'translator' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = LANGUAGE_CODES __UpperCamelCase = ['text', 'text', 'text'] __UpperCamelCase = ['text'] def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.model.generate(**lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
317
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 class __lowerCamelCase ( __lowercase , __lowercase ): __UpperCamelCase = 1 @register_to_config def __init__(self , lowerCamelCase = 2_000 , lowerCamelCase = 0.15 , lowerCamelCase = 0.01 , lowerCamelCase = 1348.0 , lowerCamelCase = 1e-5 , lowerCamelCase = 1 , ): '''simple docstring''' _lowerCAmelCase = sigma_max # setable values _lowerCAmelCase = None self.set_sigmas(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' return sample def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps _lowerCAmelCase = torch.linspace(1 , lowerCamelCase , lowerCamelCase , device=lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min _lowerCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max _lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) _lowerCAmelCase = torch.exp(torch.linspace(math.log(lowerCamelCase ) , math.log(lowerCamelCase ) , lowerCamelCase ) ) _lowerCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) _lowerCAmelCase = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) _lowerCAmelCase = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda _lowerCAmelCase = timesteps.to(self.discrete_sigmas.device ) _lowerCAmelCase = self.discrete_sigmas[timesteps].to(sample.device ) _lowerCAmelCase = self.get_adjacent_sigma(lowerCamelCase , lowerCamelCase ).to(sample.device ) _lowerCAmelCase = torch.zeros_like(lowerCamelCase ) _lowerCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods _lowerCAmelCase = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): _lowerCAmelCase = diffusion.unsqueeze(-1 ) _lowerCAmelCase = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of _lowerCAmelCase = randn_tensor( sample.shape , layout=sample.layout , generator=lowerCamelCase , device=sample.device , dtype=sample.dtype ) _lowerCAmelCase = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? _lowerCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCamelCase , prev_sample_mean=lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction _lowerCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr _lowerCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() _lowerCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() _lowerCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 _lowerCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term _lowerCAmelCase = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): _lowerCAmelCase = step_size.unsqueeze(-1 ) _lowerCAmelCase = sample + step_size * model_output _lowerCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = timesteps.to(original_samples.device ) _lowerCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps] _lowerCAmelCase = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None] ) _lowerCAmelCase = noise + original_samples return noisy_samples def __len__(self ): '''simple docstring''' return self.config.num_train_timesteps
317
"""simple docstring""" from math import isqrt def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , snake_case_ , snake_case_ ): _lowerCAmelCase = False return [i for i in range(2 , snake_case_ ) if is_prime[i]] def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int: """simple docstring""" _lowerCAmelCase = calculate_prime_numbers(max_number // 2 ) _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = len(snake_case_ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCamelCase = 'CIDAS/clipseg-rd64-refined' __UpperCamelCase = 'image_segmenter' __UpperCamelCase = CLIPSegForImageSegmentation __UpperCamelCase = ['image', 'text'] __UpperCamelCase = ['image'] def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*lowerCamelCase , **lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase = self.model(**lowerCamelCase ).logits return logits def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = outputs.cpu().detach().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
317
1
"""simple docstring""" from math import factorial SCREAMING_SNAKE_CASE : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def __UpperCAmelCase ( snake_case_ : int ) -> int: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case_ ) ) def __UpperCAmelCase ( snake_case_ : int = 60 , snake_case_ : int = 1000000 ) -> int: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not isinstance(snake_case_ , snake_case_ ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length _lowerCAmelCase = 0 # the cached sizes of the previous chains _lowerCAmelCase = {} for start_chain_element in range(1 , snake_case_ ): # The temporary set will contain the elements of the chain _lowerCAmelCase = set() _lowerCAmelCase = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _lowerCAmelCase = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(snake_case_ ) chain_set_length += 1 _lowerCAmelCase = digit_factorial_sum(snake_case_ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _lowerCAmelCase = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
317
"""simple docstring""" from __future__ import annotations import queue class __lowerCamelCase : def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def __UpperCAmelCase ( ) -> TreeNode: """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = F"""Enter the left node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = left_node q.put(snake_case_ ) _lowerCAmelCase = F"""Enter the right node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = right_node q.put(snake_case_ ) raise def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case_ ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(snake_case_ ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
317
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) class __lowerCamelCase ( __lowercase ): def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
317
"""simple docstring""" from __future__ import annotations class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = text, pattern _lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def A__ (self ): '''simple docstring''' _lowerCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): _lowerCAmelCase = self.mismatch_in_text(lowerCamelCase ) if mismatch_index == -1: positions.append(lowerCamelCase ) else: _lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) _lowerCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions SCREAMING_SNAKE_CASE : Any = '''ABAABA''' SCREAMING_SNAKE_CASE : Optional[int] = '''AB''' SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern) SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
317
1
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = ['''model.decoder.embed_positions.weights'''] def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Tuple: """simple docstring""" if "emb" in name: _lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _lowerCAmelCase = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _lowerCAmelCase = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _lowerCAmelCase = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def __UpperCAmelCase ( snake_case_ : OrderedDict , snake_case_ : int ) -> Tuple[Dict, Dict]: """simple docstring""" _lowerCAmelCase = list(state_dict.keys() ) _lowerCAmelCase = {} for key in keys: _lowerCAmelCase = state_dict.pop(snake_case_ ) _lowerCAmelCase = rename_keys(snake_case_ ) if "in_proj_weight" in key: # split fused qkv proj _lowerCAmelCase = val[:hidden_size, :] _lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] _lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _lowerCAmelCase = val else: _lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def __UpperCAmelCase ( snake_case_ : str ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values _lowerCAmelCase = 1024 _lowerCAmelCase = 24 _lowerCAmelCase = 16 elif checkpoint == "medium": _lowerCAmelCase = 1536 _lowerCAmelCase = 48 _lowerCAmelCase = 24 elif checkpoint == "large": _lowerCAmelCase = 2048 _lowerCAmelCase = 48 _lowerCAmelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _lowerCAmelCase = MusicgenDecoderConfig( hidden_size=snake_case_ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , ) return config @torch.no_grad() def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : int=None , snake_case_ : Tuple=None , snake_case_ : str="cpu" ) -> Any: """simple docstring""" _lowerCAmelCase = MusicGen.get_pretrained(snake_case_ , device=snake_case_ ) _lowerCAmelCase = decoder_config_from_checkpoint(snake_case_ ) _lowerCAmelCase = fairseq_model.lm.state_dict() _lowerCAmelCase , _lowerCAmelCase = rename_state_dict( snake_case_ , hidden_size=decoder_config.hidden_size ) _lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" ) _lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _lowerCAmelCase = MusicgenForCausalLM(snake_case_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _lowerCAmelCase , _lowerCAmelCase = decoder.load_state_dict(snake_case_ , strict=snake_case_ ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case_ ) if len(snake_case_ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(snake_case_ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=snake_case_ , audio_encoder=snake_case_ , decoder=snake_case_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case_ ) # check we can do a forward pass _lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _lowerCAmelCase = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" ) _lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _lowerCAmelCase = MusicgenProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ ) # set the appropriate bos/pad token ids _lowerCAmelCase = 2048 _lowerCAmelCase = 2048 # set other default generation config params _lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) _lowerCAmelCase = True _lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(snake_case_ ) processor.push_to_hub(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
317
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Any: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = set({"""(""", """[""", """{"""} ) _lowerCAmelCase = set({""")""", """]""", """}"""} ) _lowerCAmelCase = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(snake_case_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(snake_case_ ) == 0 or (len(snake_case_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(snake_case_ ) == 0 def __UpperCAmelCase ( ) -> str: """simple docstring""" _lowerCAmelCase = input("""Enter sequence of brackets: """ ) if is_balanced(snake_case_ ): print(snake_case_ , """is balanced""" ) else: print(snake_case_ , """is not balanced""" ) if __name__ == "__main__": main()
317
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def A__ (self , lowerCamelCase , lowerCamelCase=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1e-3 ) def A__ (self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A__ (self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ (self ): '''simple docstring''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def A__ (self ): '''simple docstring''' _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
317
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase ): @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ).loss _lowerCAmelCase = -tf.math.reduce_mean(lowerCamelCase ).numpy() _lowerCAmelCase = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
317
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict: """simple docstring""" return getitem, k def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]: """simple docstring""" return setitem, k, v def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]: """simple docstring""" return delitem, k def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str: """simple docstring""" try: return fun(snake_case_ , *snake_case_ ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE : int = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) SCREAMING_SNAKE_CASE : List[Any] = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] SCREAMING_SNAKE_CASE : Any = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] SCREAMING_SNAKE_CASE : Union[str, Any] = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] SCREAMING_SNAKE_CASE : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple: """simple docstring""" _lowerCAmelCase = HashMap(initial_block_size=4 ) _lowerCAmelCase = {} for _, (fun, *args) in enumerate(snake_case_ ): _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) assert my_res == py_res assert str(snake_case_ ) == str(snake_case_ ) assert set(snake_case_ ) == set(snake_case_ ) assert len(snake_case_ ) == len(snake_case_ ) assert set(my.items() ) == set(py.items() ) def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" def is_public(snake_case_ : str ) -> bool: return not name.startswith("""_""" ) _lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )} _lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )} assert dict_public_names > hash_public_names
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int ) -> int: """simple docstring""" _lowerCAmelCase = [1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, 0 _lowerCAmelCase = ugly_nums[ia] * 2 _lowerCAmelCase = ugly_nums[ia] * 3 _lowerCAmelCase = ugly_nums[ia] * 5 for _ in range(1 , snake_case_ ): _lowerCAmelCase = min(snake_case_ , snake_case_ , snake_case_ ) ugly_nums.append(snake_case_ ) if next_num == next_a: ia += 1 _lowerCAmelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 _lowerCAmelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 _lowerCAmelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'{ugly_numbers(2_0_0) = }')
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations(snake_case_ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( snake_case_ : int , snake_case_ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case_ ) for item in array ) _lowerCAmelCase = answer return answer _lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" _lowerCAmelCase = [0] * (target + 1) _lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(snake_case_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Any = 5 SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5] print(combination_sum_iv(n, array, target))
317
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE : int = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = ['''MobileNetV2FeatureExtractor'''] SCREAMING_SNAKE_CASE : List[str] = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Any = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None: """simple docstring""" _lowerCAmelCase = "" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ): _lowerCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case_ ) return decoded def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]: """simple docstring""" _lowerCAmelCase = [] for key in product(snake_case_ , repeat=3 ): _lowerCAmelCase = try_key(snake_case_ , snake_case_ ) if encoded is not None: possibles.append(snake_case_ ) return possibles def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" ) _lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )] _lowerCAmelCase = filter_valid_chars(snake_case_ ) for common_word in COMMON_WORDS: _lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ ) if len(snake_case_ ) == 1: break _lowerCAmelCase = possibles[0] return sum(ord(snake_case_ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any=False ) -> str: """simple docstring""" _lowerCAmelCase = OmegaConf.load(snake_case_ ) if display: print(yaml.dump(OmegaConf.to_container(snake_case_ ) ) ) return config def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : int=None , snake_case_ : List[str]=None ) -> Tuple: """simple docstring""" if conf_path is None: _lowerCAmelCase = """./model_checkpoints/vqgan_only.yaml""" _lowerCAmelCase = load_config(snake_case_ , display=snake_case_ ) _lowerCAmelCase = VQModel(**config.model.params ) if ckpt_path is None: _lowerCAmelCase = """./model_checkpoints/vqgan_only.pt""" _lowerCAmelCase = torch.load(snake_case_ , map_location=snake_case_ ) if ".ckpt" in ckpt_path: _lowerCAmelCase = sd["""state_dict"""] model.load_state_dict(snake_case_ , strict=snake_case_ ) model.to(snake_case_ ) del sd return model def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : Tuple ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model.encode(snake_case_ ) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) _lowerCAmelCase = model.decode(snake_case_ ) return xrec def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : Any=False ) -> str: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = string.rsplit(""".""" , 1 ) if reload: _lowerCAmelCase = importlib.import_module(snake_case_ ) importlib.reload(snake_case_ ) return getattr(importlib.import_module(snake_case_ , package=snake_case_ ) , cls ) def __UpperCAmelCase ( snake_case_ : List[str] ) -> List[Any]: """simple docstring""" if "target" not in config: raise KeyError("""Expected key `target` to instantiate.""" ) return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : str=True , snake_case_ : Union[str, Any]=True ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = instantiate_from_config(snake_case_ ) if sd is not None: model.load_state_dict(snake_case_ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any ) -> str: """simple docstring""" if ckpt: _lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" ) _lowerCAmelCase = pl_sd["""global_step"""] print(F"""loaded model from global step {global_step}.""" ) else: _lowerCAmelCase = {"""state_dict""": None} _lowerCAmelCase = None _lowerCAmelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=snake_case_ , eval_mode=snake_case_ )["""model"""] return model, global_step
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int: """simple docstring""" _lowerCAmelCase = limit + 1 _lowerCAmelCase = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'bert-generation' def __init__(self , lowerCamelCase=50_358 , lowerCamelCase=1_024 , lowerCamelCase=24 , lowerCamelCase=16 , lowerCamelCase=4_096 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase="absolute" , lowerCamelCase=True , **lowerCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = use_cache
317
"""simple docstring""" from functools import reduce SCREAMING_SNAKE_CASE : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __UpperCAmelCase ( snake_case_ : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'beit' def __init__(self , lowerCamelCase=8_192 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=[3, 5, 7, 11] , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): '''simple docstring''' super().__init__(**lowerCamelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = use_mask_token _lowerCAmelCase = use_absolute_position_embeddings _lowerCAmelCase = use_relative_position_bias _lowerCAmelCase = use_shared_relative_position_bias _lowerCAmelCase = layer_scale_init_value _lowerCAmelCase = drop_path_rate _lowerCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _lowerCAmelCase = out_indices _lowerCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _lowerCAmelCase = use_auxiliary_head _lowerCAmelCase = auxiliary_loss_weight _lowerCAmelCase = auxiliary_channels _lowerCAmelCase = auxiliary_num_convs _lowerCAmelCase = auxiliary_concat_input _lowerCAmelCase = semantic_loss_ignore_index class __lowerCamelCase ( __lowercase ): __UpperCamelCase = version.parse('1.11' ) @property def A__ (self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def A__ (self ): '''simple docstring''' return 1e-4
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : List[str] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Tuple = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
317
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __UpperCamelCase = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) __UpperCamelCase = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple: """simple docstring""" logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) _lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCAmelCase = SeqaSeqDataset # Get datasets _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCAmelCase = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) _lowerCAmelCase = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) _lowerCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowerCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCAmelCase = train_result.metrics _lowerCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _lowerCAmelCase = data_args.n_val _lowerCAmelCase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" ) _lowerCAmelCase = test_output.metrics _lowerCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _lowerCAmelCase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: _lowerCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) _lowerCAmelCase = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def __UpperCAmelCase ( snake_case_ : Any ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
317
1
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = XGLMTokenizer __UpperCamelCase = XGLMTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def A__ (self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = """<pad>""" _lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(lowerCamelCase ) , 1_008 ) def A__ (self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase ) _lowerCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def A__ (self ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def A__ (self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase , f.name ) _lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=lowerCamelCase ) _lowerCAmelCase = pickle.dumps(lowerCamelCase ) pickle.loads(lowerCamelCase ) def A__ (self ): '''simple docstring''' if not self.test_rust_tokenizer: return _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = """I was born in 92000, and this is falsé.""" _lowerCAmelCase = tokenizer.tokenize(lowerCamelCase ) _lowerCAmelCase = rust_tokenizer.tokenize(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) _lowerCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = tokenizer.encode(lowerCamelCase ) _lowerCAmelCase = rust_tokenizer.encode(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = """Hello World!""" _lowerCAmelCase = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) ) @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off _lowerCAmelCase = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) ) @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase , model_name="""facebook/xglm-564M""" , padding=lowerCamelCase , )
317
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
1
"""simple docstring""" from collections.abc import Generator from math import sin def __UpperCAmelCase ( snake_case_ : bytes ) -> bytes: """simple docstring""" if len(snake_case_ ) != 32: raise ValueError("""Input must be of length 32""" ) _lowerCAmelCase = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __UpperCAmelCase ( snake_case_ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) _lowerCAmelCase = format(snake_case_ , """08x""" )[-8:] _lowerCAmelCase = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def __UpperCAmelCase ( snake_case_ : bytes ) -> bytes: """simple docstring""" _lowerCAmelCase = B"""""" for char in message: bit_string += format(snake_case_ , """08b""" ).encode("""utf-8""" ) _lowerCAmelCase = format(len(snake_case_ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(snake_case_ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __UpperCAmelCase ( snake_case_ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(snake_case_ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(snake_case_ ) , 512 ): _lowerCAmelCase = bit_string[pos : pos + 512] _lowerCAmelCase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __UpperCAmelCase ( snake_case_ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) _lowerCAmelCase = format(snake_case_ , """032b""" ) _lowerCAmelCase = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(snake_case_ , 2 ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __UpperCAmelCase ( snake_case_ : bytes ) -> bytes: """simple docstring""" _lowerCAmelCase = preprocess(snake_case_ ) _lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _lowerCAmelCase = 0X67_452_301 _lowerCAmelCase = 0Xef_cda_b89 _lowerCAmelCase = 0X98_bad_cfe _lowerCAmelCase = 0X10_325_476 _lowerCAmelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(snake_case_ ): _lowerCAmelCase = aa _lowerCAmelCase = ba _lowerCAmelCase = ca _lowerCAmelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _lowerCAmelCase = d ^ (b & (c ^ d)) _lowerCAmelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _lowerCAmelCase = c ^ (d & (b ^ c)) _lowerCAmelCase = (5 * i + 1) % 16 elif i <= 47: _lowerCAmelCase = b ^ c ^ d _lowerCAmelCase = (3 * i + 5) % 16 else: _lowerCAmelCase = c ^ (b | not_aa(snake_case_ )) _lowerCAmelCase = (7 * i) % 16 _lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 _lowerCAmelCase = d _lowerCAmelCase = c _lowerCAmelCase = b _lowerCAmelCase = sum_aa(snake_case_ , left_rotate_aa(snake_case_ , shift_amounts[i] ) ) # Add hashed chunk to running total _lowerCAmelCase = sum_aa(snake_case_ , snake_case_ ) _lowerCAmelCase = sum_aa(snake_case_ , snake_case_ ) _lowerCAmelCase = sum_aa(snake_case_ , snake_case_ ) _lowerCAmelCase = sum_aa(snake_case_ , snake_case_ ) _lowerCAmelCase = reformat_hex(snake_case_ ) + reformat_hex(snake_case_ ) + reformat_hex(snake_case_ ) + reformat_hex(snake_case_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size def A__ (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None def A__ (self ): '''simple docstring''' _lowerCAmelCase = MobileNetVaImageProcessingTester(self ) @property def A__ (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ (self ): '''simple docstring''' pass def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 2 _lowerCAmelCase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 _lowerCAmelCase = i while n % i == 0: _lowerCAmelCase = n // i i += 1 return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" for i in range(len(snake_case_ ) - 1 , 0 , -1 ): _lowerCAmelCase = False for j in range(snake_case_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j] _lowerCAmelCase = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j] _lowerCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')] print(F'{cocktail_shaker_sort(unsorted) = }')
317
1
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration SCREAMING_SNAKE_CASE : Tuple = pytest.mark.integration SCREAMING_SNAKE_CASE : Any = {'''comet'''} SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.util.find_spec('''fairseq''') is not None SCREAMING_SNAKE_CASE : str = {'''code_eval'''} SCREAMING_SNAKE_CASE : Dict = os.name == '''nt''' SCREAMING_SNAKE_CASE : str = {'''bertscore''', '''frugalscore''', '''perplexity'''} SCREAMING_SNAKE_CASE : Optional[int] = importlib.util.find_spec('''transformers''') is not None def __UpperCAmelCase ( snake_case_ : List[str] ) -> int: """simple docstring""" @wraps(snake_case_ ) def wrapper(self : Optional[Any] , snake_case_ : Tuple ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , snake_case_ ) return wrapper def __UpperCAmelCase ( snake_case_ : List[Any] ) -> List[Any]: """simple docstring""" @wraps(snake_case_ ) def wrapper(self : Any , snake_case_ : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , snake_case_ ) return wrapper def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Dict: """simple docstring""" @wraps(snake_case_ ) def wrapper(self : Dict , snake_case_ : Union[str, Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , snake_case_ ) return wrapper def __UpperCAmelCase ( ) -> Any: """simple docstring""" _lowerCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __lowercase , __lowercase , __lowercase ) @local class __lowerCamelCase ( parameterized.TestCase ): __UpperCamelCase = {} __UpperCamelCase = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = """[...]""" _lowerCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , lowerCamelCase ) ).module_path ) _lowerCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase ) # check parameters _lowerCAmelCase = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCamelCase , metric_module.__name__ ): with self.use_local_metrics(): try: _lowerCAmelCase = doctest.testmod(lowerCamelCase , verbose=lowerCamelCase , raise_on_error=lowerCamelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = """[...]""" _lowerCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , lowerCamelCase ) ).module_path ) # run doctest with self.use_local_metrics(): _lowerCAmelCase = doctest.testmod(lowerCamelCase , verbose=lowerCamelCase , raise_on_error=lowerCamelCase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase ): yield else: yield @contextmanager def A__ (self ): '''simple docstring''' def load_local_metric(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ): return load_metric(os.path.join("""metrics""" , lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase ) with patch("""datasets.load_metric""" ) as mock_load_metric: _lowerCAmelCase = load_local_metric yield @classmethod def A__ (cls , lowerCamelCase ): '''simple docstring''' def wrapper(lowerCamelCase ): _lowerCAmelCase = contextmanager(lowerCamelCase ) _lowerCAmelCase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Any: """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class __lowerCamelCase ( __lowercase ): def A__ (self , lowerCamelCase ): '''simple docstring''' assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: _lowerCAmelCase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Any: """simple docstring""" import torch def bert_cos_score_idf(snake_case_ : Dict , snake_case_ : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: _lowerCAmelCase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def __UpperCAmelCase ( snake_case_ : List[str] ) -> List[Any]: """simple docstring""" def load_from_checkpoint(snake_case_ : int ): class __lowerCamelCase : def A__ (self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' assert len(lowerCamelCase ) == 2 _lowerCAmelCase = [0.19, 0.92] return scores, sum(lowerCamelCase ) / len(lowerCamelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: _lowerCAmelCase = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: _lowerCAmelCase = load_from_checkpoint yield def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _lowerCAmelCase = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) _lowerCAmelCase = """ERROR""" _lowerCAmelCase = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(snake_case_ , match=re.escape(snake_case_ ) ): metric.compute(predictions=[] , references=[] , scheme=snake_case_ )
317
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple: """simple docstring""" def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: """simple docstring""" _lowerCAmelCase = random.Random() _lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "TensorFlow" @property def A__ (self ): '''simple docstring''' return tf.__version__ def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) _lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients _lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ (self , lowerCamelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) _lowerCAmelCase = meminfo.used _lowerCAmelCase = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase = None else: _lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase ) _lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase = stop_memory_tracing(lowerCamelCase ) if memory is None: _lowerCAmelCase = summary.total else: _lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
317
1
"""simple docstring""" import torch from torch import nn class __lowerCamelCase ( nn.Module ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=False ): '''simple docstring''' super().__init__() _lowerCAmelCase = n_token _lowerCAmelCase = d_embed _lowerCAmelCase = d_proj _lowerCAmelCase = cutoffs + [n_token] _lowerCAmelCase = [0] + self.cutoffs _lowerCAmelCase = div_val _lowerCAmelCase = self.cutoffs[0] _lowerCAmelCase = len(self.cutoffs ) - 1 _lowerCAmelCase = self.shortlist_size + self.n_clusters if self.n_clusters > 0: _lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) _lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) ) _lowerCAmelCase = nn.ModuleList() _lowerCAmelCase = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase , lowerCamelCase ) ) ) else: self.out_projs.append(lowerCamelCase ) self.out_layers.append(nn.Linear(lowerCamelCase , lowerCamelCase ) ) else: for i in range(len(self.cutoffs ) ): _lowerCAmelCase , _lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _lowerCAmelCase = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase , lowerCamelCase ) ) ) self.out_layers.append(nn.Linear(lowerCamelCase , r_idx - l_idx ) ) _lowerCAmelCase = keep_order def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if proj is None: _lowerCAmelCase = nn.functional.linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: _lowerCAmelCase = nn.functional.linear(lowerCamelCase , proj.t().contiguous() ) _lowerCAmelCase = nn.functional.linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def A__ (self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n _lowerCAmelCase = hidden[..., :-1, :].contiguous() _lowerCAmelCase = labels[..., 1:].contiguous() _lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) ) _lowerCAmelCase = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" ) else: _lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: _lowerCAmelCase = self._compute_logit(lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: _lowerCAmelCase = labels != -100 _lowerCAmelCase = torch.zeros_like(lowerCamelCase , dtype=hidden.dtype , device=hidden.device ) _lowerCAmelCase = ( -nn.functional.log_softmax(lowerCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: _lowerCAmelCase = nn.functional.log_softmax(lowerCamelCase , dim=-1 ) else: # construct weights and biases _lowerCAmelCase , _lowerCAmelCase = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: _lowerCAmelCase , _lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx] _lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx] else: _lowerCAmelCase = self.out_layers[i].weight _lowerCAmelCase = self.out_layers[i].bias if i == 0: _lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 ) _lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(lowerCamelCase ) biases.append(lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = weights[0], biases[0], self.out_projs[0] _lowerCAmelCase = self._compute_logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = nn.functional.log_softmax(lowerCamelCase , dim=1 ) if labels is None: _lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: _lowerCAmelCase = torch.zeros_like(lowerCamelCase , dtype=hidden.dtype , device=hidden.device ) _lowerCAmelCase = 0 _lowerCAmelCase = [0] + self.cutoffs for i in range(len(lowerCamelCase ) - 1 ): _lowerCAmelCase , _lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1] if labels is not None: _lowerCAmelCase = (labels >= l_idx) & (labels < r_idx) _lowerCAmelCase = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue _lowerCAmelCase = labels.index_select(0 , lowerCamelCase ) - l_idx _lowerCAmelCase = head_logprob.index_select(0 , lowerCamelCase ) _lowerCAmelCase = hidden.index_select(0 , lowerCamelCase ) else: _lowerCAmelCase = hidden if i == 0: if labels is not None: _lowerCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: _lowerCAmelCase = head_logprob[:, : self.cutoffs[0]] else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = weights[i], biases[i], self.out_projs[i] _lowerCAmelCase = self._compute_logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = nn.functional.log_softmax(lowerCamelCase , dim=1 ) _lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: _lowerCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: _lowerCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i _lowerCAmelCase = logprob_i if labels is not None: if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order: out.index_copy_(0 , lowerCamelCase , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def A__ (self , lowerCamelCase ): '''simple docstring''' if self.n_clusters == 0: _lowerCAmelCase = self._compute_logit(lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(lowerCamelCase , dim=-1 ) else: # construct weights and biases _lowerCAmelCase , _lowerCAmelCase = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: _lowerCAmelCase , _lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx] _lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx] else: _lowerCAmelCase = self.out_layers[i].weight _lowerCAmelCase = self.out_layers[i].bias if i == 0: _lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 ) _lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(lowerCamelCase ) biases.append(lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = weights[0], biases[0], self.out_projs[0] _lowerCAmelCase = self._compute_logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) ) _lowerCAmelCase = nn.functional.log_softmax(lowerCamelCase , dim=1 ) _lowerCAmelCase = [0] + self.cutoffs for i in range(len(lowerCamelCase ) - 1 ): _lowerCAmelCase , _lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1] if i == 0: _lowerCAmelCase = head_logprob[:, : self.cutoffs[0]] else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = weights[i], biases[i], self.out_projs[i] _lowerCAmelCase = self._compute_logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = nn.functional.log_softmax(lowerCamelCase , dim=1 ) _lowerCAmelCase = head_logprob[:, -i] + tail_logprob_i _lowerCAmelCase = logprob_i return out
317
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { '''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'transfo-xl' __UpperCamelCase = ['mems'] __UpperCamelCase = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = vocab_size _lowerCAmelCase = [] self.cutoffs.extend(lowerCamelCase ) if proj_share_all_but_first: _lowerCAmelCase = [False] + [True] * len(self.cutoffs ) else: _lowerCAmelCase = [False] + [False] * len(self.cutoffs ) _lowerCAmelCase = d_model _lowerCAmelCase = d_embed _lowerCAmelCase = d_head _lowerCAmelCase = d_inner _lowerCAmelCase = div_val _lowerCAmelCase = pre_lnorm _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = mem_len _lowerCAmelCase = same_length _lowerCAmelCase = attn_type _lowerCAmelCase = clamp_len _lowerCAmelCase = sample_softmax _lowerCAmelCase = adaptive _lowerCAmelCase = dropout _lowerCAmelCase = dropatt _lowerCAmelCase = untie_r _lowerCAmelCase = init _lowerCAmelCase = init_range _lowerCAmelCase = proj_init_std _lowerCAmelCase = init_std _lowerCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase ) @property def A__ (self ): '''simple docstring''' logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def A__ (self , lowerCamelCase ): '''simple docstring''' raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
317
1
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def __UpperCAmelCase ( ) -> str: """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""--model_ckpt""" , type=snake_case_ , default="""microsoft/unixcoder-base-nine""" ) parser.add_argument("""--num_epochs""" , type=snake_case_ , default=5 ) parser.add_argument("""--batch_size""" , type=snake_case_ , default=6 ) parser.add_argument("""--gradient_accumulation_steps""" , type=snake_case_ , default=1 ) parser.add_argument("""--freeze""" , type=snake_case_ , default=snake_case_ ) parser.add_argument("""--learning_rate""" , type=snake_case_ , default=5e-4 ) parser.add_argument("""--seed""" , type=snake_case_ , default=0 ) parser.add_argument("""--lr_scheduler_type""" , type=snake_case_ , default="""cosine""" ) parser.add_argument("""--num_warmup_steps""" , type=snake_case_ , default=10 ) parser.add_argument("""--weight_decay""" , type=snake_case_ , default=0.0_1 ) parser.add_argument("""--output_dir""" , type=snake_case_ , default="""./results""" ) return parser.parse_args() SCREAMING_SNAKE_CASE : Optional[int] = load('''accuracy''') def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> List[Any]: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = eval_pred _lowerCAmelCase = np.argmax(snake_case_ , axis=1 ) return metric.compute(predictions=snake_case_ , references=snake_case_ ) class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase ): '''simple docstring''' super().__init__() _lowerCAmelCase = trainer def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): '''simple docstring''' if control.should_evaluate: _lowerCAmelCase = deepcopy(lowerCamelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" ) return control_copy def __UpperCAmelCase ( ) -> str: """simple docstring""" _lowerCAmelCase = get_args() set_seed(args.seed ) _lowerCAmelCase = load_dataset("""codeparrot/codecomplex""" , split="""train""" ) _lowerCAmelCase = dataset.train_test_split(test_size=0.2 ) _lowerCAmelCase = train_test["""test"""].train_test_split(test_size=0.5 ) _lowerCAmelCase = DatasetDict( { """train""": train_test["""train"""], """test""": test_validation["""train"""], """valid""": test_validation["""test"""], } ) print("""Loading tokenizer and model""" ) _lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _lowerCAmelCase = tokenizer.eos_token _lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _lowerCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _lowerCAmelCase = False _lowerCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) ) def tokenize(snake_case_ : List[Any] ): _lowerCAmelCase = tokenizer(example["""src"""] , truncation=snake_case_ , max_length=1024 ) _lowerCAmelCase = labels.straint(example["""complexity"""] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _lowerCAmelCase = train_test_validation.map( snake_case_ , batched=snake_case_ , remove_columns=train_test_validation["""train"""].column_names , ) _lowerCAmelCase = DataCollatorWithPadding(tokenizer=snake_case_ ) _lowerCAmelCase = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , ) _lowerCAmelCase = Trainer( model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=snake_case_ , data_collator=snake_case_ , compute_metrics=snake_case_ , ) print("""Training...""" ) trainer.add_callback(CustomCallback(snake_case_ ) ) trainer.train() if __name__ == "__main__": main()
317
"""simple docstring""" import math def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 2 _lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowerCAmelCase = [True] * (end + 1) _lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowerCAmelCase = False start += 1 prime += in_prime _lowerCAmelCase = end + 1 _lowerCAmelCase = min(2 * end , snake_case_ ) while low <= n: _lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: _lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowerCAmelCase = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowerCAmelCase = high + 1 _lowerCAmelCase = min(high + end , snake_case_ ) return prime print(sieve(1_0**6))
317
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any]=8 ) -> Tuple: """simple docstring""" _lowerCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _lowerCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def __UpperCAmelCase ( snake_case_ : int , snake_case_ : str=512 , snake_case_ : Any=512 ) -> List[str]: """simple docstring""" _lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) _lowerCAmelCase = np.array(pil_image.convert("""RGB""" ) ) _lowerCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1 _lowerCAmelCase = np.transpose(snake_case_ , [2, 0, 1] ) _lowerCAmelCase = torch.from_numpy(snake_case_ ).unsqueeze(0 ) return image class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , ) _lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = min(int(num_inference_steps * strength ) , lowerCamelCase ) _lowerCAmelCase = max(num_inference_steps - init_timestep , 0 ) _lowerCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): '''simple docstring''' if not isinstance(lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}""" ) _lowerCAmelCase = image.to(device=lowerCamelCase , dtype=lowerCamelCase ) _lowerCAmelCase = batch_size * num_images_per_prompt if image.shape[1] == 4: _lowerCAmelCase = image else: if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase ) ] _lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) else: _lowerCAmelCase = self.movq.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase ) _lowerCAmelCase = self.movq.config.scaling_factor * init_latents _lowerCAmelCase = torch.cat([init_latents] , dim=0 ) _lowerCAmelCase = init_latents.shape _lowerCAmelCase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase ) # get latents _lowerCAmelCase = self.scheduler.add_noise(lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = init_latents return latents def A__ (self , lowerCamelCase=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _lowerCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) _lowerCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase , lowerCamelCase ) def A__ (self , lowerCamelCase=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _lowerCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _lowerCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: _lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase ) # We'll offload the last model manually. _lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A__ (self ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase ) def __call__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 0.3 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ): '''simple docstring''' _lowerCAmelCase = self._execution_device _lowerCAmelCase = guidance_scale > 1.0 if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) _lowerCAmelCase = image_embeds.shape[0] if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) if do_classifier_free_guidance: _lowerCAmelCase = image_embeds.repeat_interleave(lowerCamelCase , dim=0 ) _lowerCAmelCase = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 ) _lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase ) if not isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = [image] if not all(isinstance(lowerCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) _lowerCAmelCase = torch.cat([prepare_image(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for i in image] , dim=0 ) _lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=lowerCamelCase ) _lowerCAmelCase = self.movq.encode(lowerCamelCase )["""latents"""] _lowerCAmelCase = latents.repeat_interleave(lowerCamelCase , dim=0 ) self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase = self.get_timesteps(lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(lowerCamelCase , lowerCamelCase , self.movq_scale_factor ) _lowerCAmelCase = self.prepare_latents( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , image_embeds.dtype , lowerCamelCase , lowerCamelCase ) for i, t in enumerate(self.progress_bar(lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase = {"""image_embeds""": image_embeds} _lowerCAmelCase = self.unet( sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0] if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) _lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 ) _lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 ) _lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step( lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , )[0] # post-processing _lowerCAmelCase = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _lowerCAmelCase = image * 0.5 + 0.5 _lowerCAmelCase = image.clamp(0 , 1 ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
317
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0 SCREAMING_SNAKE_CASE : Optional[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = '''''' SCREAMING_SNAKE_CASE : List[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = 2_5_0 def __UpperCAmelCase ( ) -> None: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ ) for index in range(snake_case_ ): _lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase = random_chars(32 ) _lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] _lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) _lowerCAmelCase = [] for anno in new_annos: _lowerCAmelCase = anno[3] - anno[1] _lowerCAmelCase = anno[4] - anno[2] _lowerCAmelCase = anno[1] + width / 2 _lowerCAmelCase = anno[2] + height / 2 _lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(snake_case_ ) with open(F"""{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ): _lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(snake_case_ ) as in_file: _lowerCAmelCase = in_file.readlines() _lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" ) _lowerCAmelCase = [] for obj_list in obj_lists: _lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ ) _lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2 _lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(snake_case_ ) labels.append(snake_case_ ) return img_paths, labels def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" _lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = int(scale_x * output_size[1] ) _lowerCAmelCase = int(scale_y * output_size[0] ) _lowerCAmelCase = [] _lowerCAmelCase = [] for i, index in enumerate(snake_case_ ): _lowerCAmelCase = all_img_list[index] path_list.append(snake_case_ ) _lowerCAmelCase = all_annos[index] _lowerCAmelCase = cva.imread(snake_case_ ) if i == 0: # top-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _lowerCAmelCase = cva.resize( snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _lowerCAmelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __UpperCAmelCase ( snake_case_ : int ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase = ascii_lowercase + digits return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
317
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCamelCase = 'CIDAS/clipseg-rd64-refined' __UpperCamelCase = 'image_segmenter' __UpperCamelCase = CLIPSegForImageSegmentation __UpperCamelCase = ['image', 'text'] __UpperCamelCase = ['image'] def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*lowerCamelCase , **lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase = self.model(**lowerCamelCase ).logits return logits def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = outputs.cpu().detach().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
317
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main _lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> str: """simple docstring""" return "\n".join( F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE : Optional[Any] = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'facebook/nllb-200-distilled-600M' __UpperCamelCase = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) __UpperCamelCase = 'translator' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = LANGUAGE_CODES __UpperCamelCase = ['text', 'text', 'text'] __UpperCamelCase = ['text'] def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.model.generate(**lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> bool: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(snake_case_ ) == 0: raise ValueError("""Input list must be a non empty list""" ) if len(snake_case_ ) == 1: return True _lowerCAmelCase = series[1] - series[0] for index in range(len(snake_case_ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __UpperCAmelCase ( snake_case_ : list ) -> float: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(snake_case_ ) == 0: raise ValueError("""Input list must be a non empty list""" ) _lowerCAmelCase = 0 for val in series: answer += val return answer / len(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" from math import isqrt def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , snake_case_ , snake_case_ ): _lowerCAmelCase = False return [i for i in range(2 , snake_case_ ) if is_prime[i]] def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int: """simple docstring""" _lowerCAmelCase = calculate_prime_numbers(max_number // 2 ) _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = len(snake_case_ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests SCREAMING_SNAKE_CASE : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCamelCase = 'CIDAS/clipseg-rd64-refined' __UpperCamelCase = 'image_segmenter' __UpperCamelCase = CLIPSegForImageSegmentation __UpperCamelCase = ['image', 'text'] __UpperCamelCase = ['image'] def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*lowerCamelCase , **lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase = self.model(**lowerCamelCase ).logits return logits def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = outputs.cpu().detach().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
317
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE : Dict = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
"""simple docstring""" from __future__ import annotations import queue class __lowerCamelCase : def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def __UpperCAmelCase ( ) -> TreeNode: """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = F"""Enter the left node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = left_node q.put(snake_case_ ) _lowerCAmelCase = F"""Enter the right node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = right_node q.put(snake_case_ ) raise def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case_ ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(snake_case_ ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
317
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE : Tuple = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE : Any = { '''camembert-base''': 5_1_2, } SCREAMING_SNAKE_CASE : int = '''▁''' class __lowerCamelCase ( __lowercase ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['input_ids', 'attention_mask'] __UpperCamelCase = CamembertTokenizer def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) _lowerCAmelCase = vocab_file _lowerCAmelCase = False if not self.vocab_file else True def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] _lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCAmelCase = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
317
"""simple docstring""" from __future__ import annotations class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = text, pattern _lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def A__ (self ): '''simple docstring''' _lowerCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): _lowerCAmelCase = self.mismatch_in_text(lowerCamelCase ) if mismatch_index == -1: positions.append(lowerCamelCase ) else: _lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) _lowerCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions SCREAMING_SNAKE_CASE : Any = '''ABAABA''' SCREAMING_SNAKE_CASE : Optional[int] = '''AB''' SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern) SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000 ) -> int: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = 1, 1 _lowerCAmelCase = [] for i in range(1 , n + 1 ): _lowerCAmelCase = prev_numerator + 2 * prev_denominator _lowerCAmelCase = prev_numerator + prev_denominator if len(str(snake_case_ ) ) > len(str(snake_case_ ) ): result.append(snake_case_ ) _lowerCAmelCase = numerator _lowerCAmelCase = denominator return len(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
1
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowerCamelCase ( __lowercase ): def __init__(self , *lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ): '''simple docstring''' super().__init__(*lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = eval_examples _lowerCAmelCase = post_process_function _lowerCAmelCase = quant_trainer_args _lowerCAmelCase = 128 # default number of calibration samples def A__ (self , lowerCamelCase=None ): '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) _lowerCAmelCase = calib_dataset if calib_dataset is not None else self.calib_dataset _lowerCAmelCase = self._remove_unused_columns(lowerCamelCase , description="""Calibration""" ) return DataLoader( lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase , ) def A__ (self , lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase = self.train_dataset if calib_dataset is None else calib_dataset _lowerCAmelCase = self.get_calib_dataloader(lowerCamelCase ) _lowerCAmelCase = self.model quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args , calib=lowerCamelCase ) model.eval() quant_trainer.enable_calibration(lowerCamelCase ) logger.info("""***** Running calibration *****""" ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(lowerCamelCase ): # Prediction step _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prediction_step(lowerCamelCase , lowerCamelCase , prediction_loss_only=lowerCamelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(lowerCamelCase , self.quant_trainer_args ) _lowerCAmelCase = model def A__ (self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = "eval" ): '''simple docstring''' _lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset _lowerCAmelCase = self.get_eval_dataloader(lowerCamelCase ) _lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _lowerCAmelCase = self.compute_metrics _lowerCAmelCase = None _lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowerCAmelCase = eval_loop( lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , ) finally: _lowerCAmelCase = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _lowerCAmelCase = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions ) _lowerCAmelCase = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _lowerCAmelCase = metrics.pop(lowerCamelCase ) self.log(lowerCamelCase ) else: _lowerCAmelCase = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase ) return metrics def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase = "test" ): '''simple docstring''' _lowerCAmelCase = self.get_test_dataloader(lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. _lowerCAmelCase = self.compute_metrics _lowerCAmelCase = None _lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowerCAmelCase = eval_loop( lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , ) finally: _lowerCAmelCase = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _lowerCAmelCase = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions , """predict""" ) _lowerCAmelCase = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _lowerCAmelCase = metrics.pop(lowerCamelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase ) def A__ (self , lowerCamelCase="./" ): '''simple docstring''' _lowerCAmelCase = self.eval_dataset _lowerCAmelCase = self.get_eval_dataloader(lowerCamelCase ) _lowerCAmelCase = next(iter(lowerCamelCase ) ) # saving device - to make it consistent _lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple _lowerCAmelCase = tuple(v.to(lowerCamelCase ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer _lowerCAmelCase = True _lowerCAmelCase = self.model.to(lowerCamelCase ) model.eval() model.float() _lowerCAmelCase = model.module if hasattr(lowerCamelCase , """module""" ) else model quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args ) _lowerCAmelCase = os.path.join(lowerCamelCase , """model.onnx""" ) logger.info(f"""exporting model to {output_model_file}""" ) _lowerCAmelCase = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( lowerCamelCase , lowerCamelCase , lowerCamelCase , export_params=lowerCamelCase , opset_version=13 , do_constant_folding=lowerCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=lowerCamelCase , ) logger.info("""onnx export finished""" )
317
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def A__ (self , lowerCamelCase , lowerCamelCase=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1e-3 ) def A__ (self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A__ (self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ (self ): '''simple docstring''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def A__ (self ): '''simple docstring''' _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
317
1
"""simple docstring""" from manim import * class __lowerCamelCase ( __lowercase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = Rectangle(height=0.5 , width=0.5 ) _lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _lowerCAmelCase = [mem.copy() for i in range(6 )] _lowerCAmelCase = [mem.copy() for i in range(6 )] _lowerCAmelCase = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 ) _lowerCAmelCase = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 ) _lowerCAmelCase = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 ) _lowerCAmelCase = Text("""CPU""" , font_size=24 ) _lowerCAmelCase = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase ) _lowerCAmelCase = [mem.copy() for i in range(4 )] _lowerCAmelCase = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 ) _lowerCAmelCase = Text("""GPU""" , font_size=24 ) _lowerCAmelCase = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase ) _lowerCAmelCase = [mem.copy() for i in range(6 )] _lowerCAmelCase = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 ) _lowerCAmelCase = Text("""Model""" , font_size=24 ) _lowerCAmelCase = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase ) _lowerCAmelCase = [] for i, rect in enumerate(lowerCamelCase ): rect.set_stroke(lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _lowerCAmelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase , buff=0.0 ) self.add(lowerCamelCase ) cpu_targs.append(lowerCamelCase ) _lowerCAmelCase = [mem.copy() for i in range(6 )] _lowerCAmelCase = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 ) _lowerCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 ) _lowerCAmelCase = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , aligned_edge=lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _lowerCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _lowerCAmelCase = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) _lowerCAmelCase = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase ) , Write(lowerCamelCase ) ) self.play(Write(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) ) _lowerCAmelCase = [] _lowerCAmelCase = [] for i, rect in enumerate(lowerCamelCase ): _lowerCAmelCase = fill.copy().set_fill(lowerCamelCase , opacity=0.7 ) target.move_to(lowerCamelCase ) first_animations.append(GrowFromCenter(lowerCamelCase , run_time=1 ) ) _lowerCAmelCase = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowerCamelCase , run_time=1.5 ) ) self.play(*lowerCamelCase ) self.play(*lowerCamelCase ) self.wait()
317
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict: """simple docstring""" return getitem, k def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]: """simple docstring""" return setitem, k, v def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]: """simple docstring""" return delitem, k def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str: """simple docstring""" try: return fun(snake_case_ , *snake_case_ ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE : int = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) SCREAMING_SNAKE_CASE : List[Any] = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] SCREAMING_SNAKE_CASE : Any = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] SCREAMING_SNAKE_CASE : Union[str, Any] = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] SCREAMING_SNAKE_CASE : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple: """simple docstring""" _lowerCAmelCase = HashMap(initial_block_size=4 ) _lowerCAmelCase = {} for _, (fun, *args) in enumerate(snake_case_ ): _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) assert my_res == py_res assert str(snake_case_ ) == str(snake_case_ ) assert set(snake_case_ ) == set(snake_case_ ) assert len(snake_case_ ) == len(snake_case_ ) assert set(my.items() ) == set(py.items() ) def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" def is_public(snake_case_ : str ) -> bool: return not name.startswith("""_""" ) _lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )} _lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )} assert dict_public_names > hash_public_names
317
1
"""simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 # The first color of the flag. SCREAMING_SNAKE_CASE : Optional[int] = 1 # The second color of the flag. SCREAMING_SNAKE_CASE : List[str] = 2 # The third color of the flag. SCREAMING_SNAKE_CASE : Optional[Any] = (red, white, blue) def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" if not sequence: return [] if len(snake_case_ ) == 1: return list(snake_case_ ) _lowerCAmelCase = 0 _lowerCAmelCase = len(snake_case_ ) - 1 _lowerCAmelCase = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(snake_case_ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : int = input('''Enter numbers separated by commas:\n''').strip() SCREAMING_SNAKE_CASE : Any = [int(item.strip()) for item in user_input.split(''',''')] print(F'{dutch_national_flag_sort(unsorted)}')
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations(snake_case_ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( snake_case_ : int , snake_case_ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case_ ) for item in array ) _lowerCAmelCase = answer return answer _lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" _lowerCAmelCase = [0] * (target + 1) _lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(snake_case_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Any = 5 SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5] print(combination_sum_iv(n, array, target))
317
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(__lowercase ) class __lowerCamelCase ( __lowercase ): def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' super().__init__(*lowerCamelCase , **lowerCamelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def A__ (self , lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase = {} if top_k is not None: _lowerCAmelCase = top_k return {}, {}, postprocess_params def __call__(self , lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return super().__call__(lowerCamelCase , **lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = load_image(lowerCamelCase ) _lowerCAmelCase = self.image_processor(images=lowerCamelCase , return_tensors=self.framework ) return model_inputs def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.model(**lowerCamelCase ) return model_outputs def A__ (self , lowerCamelCase , lowerCamelCase=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: _lowerCAmelCase = self.model.config.num_labels if self.framework == "pt": _lowerCAmelCase = model_outputs.logits.softmax(-1 )[0] _lowerCAmelCase , _lowerCAmelCase = probs.topk(lowerCamelCase ) elif self.framework == "tf": _lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0] _lowerCAmelCase = tf.math.top_k(lowerCamelCase , k=lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) _lowerCAmelCase = scores.tolist() _lowerCAmelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
317
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None: """simple docstring""" _lowerCAmelCase = "" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ): _lowerCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case_ ) return decoded def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]: """simple docstring""" _lowerCAmelCase = [] for key in product(snake_case_ , repeat=3 ): _lowerCAmelCase = try_key(snake_case_ , snake_case_ ) if encoded is not None: possibles.append(snake_case_ ) return possibles def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" ) _lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )] _lowerCAmelCase = filter_valid_chars(snake_case_ ) for common_word in COMMON_WORDS: _lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ ) if len(snake_case_ ) == 1: break _lowerCAmelCase = possibles[0] return sum(ord(snake_case_ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowerCamelCase : def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _lowerCAmelCase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) _lowerCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _lowerCAmelCase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) _lowerCAmelCase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) _lowerCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = inputs["""prompt"""] _lowerCAmelCase = inputs["""generator"""] _lowerCAmelCase = inputs["""num_inference_steps"""] _lowerCAmelCase = inputs["""output_type"""] if "image" in inputs: _lowerCAmelCase = inputs["""image"""] else: _lowerCAmelCase = None if "mask_image" in inputs: _lowerCAmelCase = inputs["""mask_image"""] else: _lowerCAmelCase = None if "original_image" in inputs: _lowerCAmelCase = inputs["""original_image"""] else: _lowerCAmelCase = None _lowerCAmelCase , _lowerCAmelCase = pipe.encode_prompt(lowerCamelCase ) # inputs with prompt converted to embeddings _lowerCAmelCase = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: _lowerCAmelCase = image if mask_image is not None: _lowerCAmelCase = mask_image if original_image is not None: _lowerCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) _lowerCAmelCase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = inputs["""generator"""] _lowerCAmelCase = inputs["""num_inference_steps"""] _lowerCAmelCase = inputs["""output_type"""] # inputs with prompt converted to embeddings _lowerCAmelCase = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: _lowerCAmelCase = image if mask_image is not None: _lowerCAmelCase = mask_image if original_image is not None: _lowerCAmelCase = original_image _lowerCAmelCase = pipe_loaded(**lowerCamelCase )[0] _lowerCAmelCase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) _lowerCAmelCase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe_loaded(**lowerCamelCase )[0] _lowerCAmelCase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 )
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int: """simple docstring""" _lowerCAmelCase = limit + 1 _lowerCAmelCase = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType SCREAMING_SNAKE_CASE : Optional[List[str]] = None SCREAMING_SNAKE_CASE : Tuple = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image SCREAMING_SNAKE_CASE : List[str] = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __lowerCamelCase : __UpperCamelCase = True __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "PIL.Image.Image" __UpperCamelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) __UpperCamelCase = field(default='Image' , init=__lowercase , repr=__lowercase ) def __call__(self ): '''simple docstring''' return self.pa_type def A__ (self , lowerCamelCase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = np.array(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ): return {"path": value, "bytes": None} elif isinstance(lowerCamelCase , lowerCamelCase ): return {"path": None, "bytes": value} elif isinstance(lowerCamelCase , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowerCamelCase ) elif isinstance(lowerCamelCase , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowerCamelCase ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def A__ (self , lowerCamelCase , lowerCamelCase=None ): '''simple docstring''' if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: _lowerCAmelCase = {} _lowerCAmelCase , _lowerCAmelCase = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(lowerCamelCase ): _lowerCAmelCase = PIL.Image.open(lowerCamelCase ) else: _lowerCAmelCase = path.split("""::""" )[-1] try: _lowerCAmelCase = string_to_dict(lowerCamelCase , config.HUB_DATASETS_URL )["""repo_id"""] _lowerCAmelCase = token_per_repo_id.get(lowerCamelCase ) except ValueError: _lowerCAmelCase = None with xopen(lowerCamelCase , """rb""" , use_auth_token=lowerCamelCase ) as f: _lowerCAmelCase = BytesIO(f.read() ) _lowerCAmelCase = PIL.Image.open(bytes_ ) else: _lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def A__ (self ): '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def A__ (self , lowerCamelCase ): '''simple docstring''' if pa.types.is_string(storage.type ): _lowerCAmelCase = pa.array([None] * len(lowerCamelCase ) , type=pa.binary() ) _lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _lowerCAmelCase = pa.array([None] * len(lowerCamelCase ) , type=pa.string() ) _lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: _lowerCAmelCase = storage.field("""bytes""" ) else: _lowerCAmelCase = pa.array([None] * len(lowerCamelCase ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: _lowerCAmelCase = storage.field("""path""" ) else: _lowerCAmelCase = pa.array([None] * len(lowerCamelCase ) , type=pa.string() ) _lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _lowerCAmelCase = pa.array( [encode_np_array(np.array(lowerCamelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _lowerCAmelCase = pa.array([None] * len(lowerCamelCase ) , type=pa.string() ) _lowerCAmelCase = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowerCamelCase , self.pa_type ) def A__ (self , lowerCamelCase ): '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(lowerCamelCase ): with xopen(lowerCamelCase , """rb""" ) as f: _lowerCAmelCase = f.read() return bytes_ _lowerCAmelCase = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _lowerCAmelCase = pa.array( [os.path.basename(lowerCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) _lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowerCamelCase , self.pa_type ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __UpperCAmelCase ( snake_case_ : "PIL.Image.Image" ) -> bytes: """simple docstring""" _lowerCAmelCase = BytesIO() if image.format in list_image_compression_formats(): _lowerCAmelCase = image.format else: _lowerCAmelCase = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(snake_case_ , format=snake_case_ ) return buffer.getvalue() def __UpperCAmelCase ( snake_case_ : "PIL.Image.Image" ) -> dict: """simple docstring""" if hasattr(snake_case_ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(snake_case_ )} def __UpperCAmelCase ( snake_case_ : np.ndarray ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) _lowerCAmelCase = array.dtype _lowerCAmelCase = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER _lowerCAmelCase = dtype.kind _lowerCAmelCase = dtype.itemsize _lowerCAmelCase = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _lowerCAmelCase = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _lowerCAmelCase = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _lowerCAmelCase = dtype_byteorder + dtype_kind + str(snake_case_ ) _lowerCAmelCase = np.dtype(snake_case_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _lowerCAmelCase = PIL.Image.fromarray(array.astype(snake_case_ ) ) return {"path": None, "bytes": image_to_bytes(snake_case_ )} def __UpperCAmelCase ( snake_case_ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: _lowerCAmelCase , _lowerCAmelCase = first_non_null_value(snake_case_ ) if isinstance(snake_case_ , snake_case_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(snake_case_ , np.ndarray ): _lowerCAmelCase = no_op_if_value_is_null(snake_case_ ) return [obj_to_image_dict_func(snake_case_ ) for obj in objs] elif isinstance(snake_case_ , PIL.Image.Image ): _lowerCAmelCase = no_op_if_value_is_null(snake_case_ ) return [obj_to_image_dict_func(snake_case_ ) for obj in objs] else: return objs else: return objs
317
"""simple docstring""" from functools import reduce SCREAMING_SNAKE_CASE : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __UpperCAmelCase ( snake_case_ : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ['image_processor', 'tokenizer'] __UpperCamelCase = 'AutoImageProcessor' __UpperCamelCase = 'AutoTokenizer' def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' super().__init__(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = self.image_processor def __call__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ) if images is not None: _lowerCAmelCase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ) if text is not None and images is not None: _lowerCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase ) def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase ) def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase ) @property def A__ (self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() SCREAMING_SNAKE_CASE : List[str] = logging.get_logger('''transformers.models.speecht5''') SCREAMING_SNAKE_CASE : List[str] = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } SCREAMING_SNAKE_CASE : Optional[Any] = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } SCREAMING_SNAKE_CASE : Optional[int] = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } SCREAMING_SNAKE_CASE : Optional[Any] = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } SCREAMING_SNAKE_CASE : Optional[Any] = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } SCREAMING_SNAKE_CASE : Tuple = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } SCREAMING_SNAKE_CASE : Optional[Any] = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } SCREAMING_SNAKE_CASE : List[Any] = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } SCREAMING_SNAKE_CASE : str = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } SCREAMING_SNAKE_CASE : Tuple = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } SCREAMING_SNAKE_CASE : Dict = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] SCREAMING_SNAKE_CASE : int = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Tuple ) -> List[str]: """simple docstring""" for attribute in key.split(""".""" ): _lowerCAmelCase = getattr(snake_case_ , snake_case_ ) if weight_type is not None: _lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape else: _lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCAmelCase = value elif weight_type == "weight_g": _lowerCAmelCase = value elif weight_type == "weight_v": _lowerCAmelCase = value elif weight_type == "bias": _lowerCAmelCase = value elif weight_type == "running_mean": _lowerCAmelCase = value elif weight_type == "running_var": _lowerCAmelCase = value elif weight_type == "num_batches_tracked": _lowerCAmelCase = value else: _lowerCAmelCase = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : str ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _lowerCAmelCase , _lowerCAmelCase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ) -> List[str]: """simple docstring""" _lowerCAmelCase = [] if task == "s2t": _lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder _lowerCAmelCase = MAPPING_S2T _lowerCAmelCase = IGNORE_KEYS_S2T elif task == "t2s": _lowerCAmelCase = None _lowerCAmelCase = MAPPING_T2S _lowerCAmelCase = IGNORE_KEYS_T2S elif task == "s2s": _lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder _lowerCAmelCase = MAPPING_S2S _lowerCAmelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(snake_case_ , snake_case_ ): logger.info(F"""{name} was ignored""" ) continue _lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , ) _lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _lowerCAmelCase , _lowerCAmelCase = key.split(""".*.""" ) if prefix in name and suffix in name: _lowerCAmelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _lowerCAmelCase = True if "*" in mapped_key: _lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2] _lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ ) if "weight_g" in name: _lowerCAmelCase = """weight_g""" elif "weight_v" in name: _lowerCAmelCase = """weight_v""" elif "bias" in name: _lowerCAmelCase = """bias""" elif "weight" in name: _lowerCAmelCase = """weight""" elif "running_mean" in name: _lowerCAmelCase = """running_mean""" elif "running_var" in name: _lowerCAmelCase = """running_var""" elif "num_batches_tracked" in name: _lowerCAmelCase = """num_batches_tracked""" else: _lowerCAmelCase = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict ) -> int: """simple docstring""" _lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] _lowerCAmelCase = name.split(""".""" ) _lowerCAmelCase = int(items[0] ) _lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , snake_case_ : str=None , ) -> Union[str, Any]: """simple docstring""" if config_path is not None: _lowerCAmelCase = SpeechTaConfig.from_pretrained(snake_case_ ) else: _lowerCAmelCase = SpeechTaConfig() if task == "s2t": _lowerCAmelCase = config.max_text_positions _lowerCAmelCase = SpeechTaForSpeechToText(snake_case_ ) elif task == "t2s": _lowerCAmelCase = 1876 _lowerCAmelCase = 600 _lowerCAmelCase = config.max_speech_positions _lowerCAmelCase = SpeechTaForTextToSpeech(snake_case_ ) elif task == "s2s": _lowerCAmelCase = 1876 _lowerCAmelCase = config.max_speech_positions _lowerCAmelCase = SpeechTaForSpeechToSpeech(snake_case_ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: _lowerCAmelCase = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken("""<mask>""" , lstrip=snake_case_ , rstrip=snake_case_ ) _lowerCAmelCase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) _lowerCAmelCase = SpeechTaFeatureExtractor() _lowerCAmelCase = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ ) processor.save_pretrained(snake_case_ ) _lowerCAmelCase = torch.load(snake_case_ ) recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case_ , snake_case_ ) model.save_pretrained(snake_case_ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(snake_case_ ) model.push_to_hub(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
317
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __UpperCamelCase = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) __UpperCamelCase = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple: """simple docstring""" logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) _lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCAmelCase = SeqaSeqDataset # Get datasets _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCAmelCase = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) _lowerCAmelCase = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) _lowerCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowerCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCAmelCase = train_result.metrics _lowerCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _lowerCAmelCase = data_args.n_val _lowerCAmelCase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" ) _lowerCAmelCase = test_output.metrics _lowerCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _lowerCAmelCase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: _lowerCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) _lowerCAmelCase = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def __UpperCAmelCase ( snake_case_ : Any ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
317
1
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging SCREAMING_SNAKE_CASE : Optional[int] = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] SCREAMING_SNAKE_CASE : int = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = ''' Hello world! cécé herlolip''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def __UpperCAmelCase ( snake_case_ : Any ) -> int: """simple docstring""" _lowerCAmelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(snake_case_ , snake_case_ ) def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str ) -> List[str]: """simple docstring""" _lowerCAmelCase = dct.pop(snake_case_ ) _lowerCAmelCase = val def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Dict: """simple docstring""" _lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" ) _lowerCAmelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> List[str]: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = emb.weight.shape _lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ ) _lowerCAmelCase = emb.weight.data return lin_layer @torch.no_grad() def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Any , snake_case_ : int=None ) -> str: """simple docstring""" if not os.path.exists(snake_case_ ): _lowerCAmelCase = torch.hub.load("""pytorch/fairseq""" , snake_case_ ).eval() else: _lowerCAmelCase = load_xsum_checkpoint(snake_case_ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _lowerCAmelCase = checkpoint_path.replace(""".""" , """-""" ) _lowerCAmelCase = BartConfig.from_pretrained(snake_case_ ) _lowerCAmelCase = bart.encode(snake_case_ ).unsqueeze(0 ) _lowerCAmelCase = BartTokenizer.from_pretrained(snake_case_ ).encode(snake_case_ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(snake_case_ , snake_case_ ).all(): raise ValueError( F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" ) if checkpoint_path == "bart.large.mnli": _lowerCAmelCase = bart.state_dict() remove_ignore_keys_(snake_case_ ) _lowerCAmelCase = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) _lowerCAmelCase = BartForSequenceClassification(snake_case_ ).eval() model.load_state_dict(snake_case_ ) _lowerCAmelCase = bart.predict("""mnli""" , snake_case_ , return_logits=snake_case_ ) _lowerCAmelCase = model(snake_case_ )[0] # logits else: # no classification heads to worry about _lowerCAmelCase = bart.model.state_dict() remove_ignore_keys_(snake_case_ ) _lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""] _lowerCAmelCase = bart.extract_features(snake_case_ ) if hf_checkpoint_name == "facebook/bart-large": _lowerCAmelCase = BartModel(snake_case_ ).eval() model.load_state_dict(snake_case_ ) _lowerCAmelCase = model(snake_case_ ).model[0] else: _lowerCAmelCase = BartForConditionalGeneration(snake_case_ ).eval() # an existing summarization ckpt model.model.load_state_dict(snake_case_ ) if hasattr(snake_case_ , """lm_head""" ): _lowerCAmelCase = make_linear_from_emb(model.model.shared ) _lowerCAmelCase = model.model(snake_case_ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
317
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int ) -> Tuple: """simple docstring""" for attribute in key.split(""".""" ): _lowerCAmelCase = getattr(snake_case_ , snake_case_ ) if weight_type is not None: _lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape else: _lowerCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCAmelCase = value elif weight_type == "weight_g": _lowerCAmelCase = value elif weight_type == "weight_v": _lowerCAmelCase = value elif weight_type == "bias": _lowerCAmelCase = value else: _lowerCAmelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : str ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = fairseq_model.state_dict() _lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , ) _lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): _lowerCAmelCase = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): _lowerCAmelCase = True if "*" in mapped_key: _lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2] _lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ ) if "weight_g" in name: _lowerCAmelCase = """weight_g""" elif "weight_v" in name: _lowerCAmelCase = """weight_v""" elif "weight" in name: _lowerCAmelCase = """weight""" elif "bias" in name: _lowerCAmelCase = """bias""" else: _lowerCAmelCase = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[str] ) -> str: """simple docstring""" _lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] _lowerCAmelCase = name.split(""".""" ) _lowerCAmelCase = int(items[0] ) _lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int]=None , snake_case_ : Tuple=None , snake_case_ : Any=True ) -> Any: """simple docstring""" if config_path is not None: _lowerCAmelCase = HubertConfig.from_pretrained(snake_case_ ) else: _lowerCAmelCase = HubertConfig() if is_finetuned: if dict_path: _lowerCAmelCase = Dictionary.load(snake_case_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCAmelCase = target_dict.pad_index _lowerCAmelCase = target_dict.bos_index _lowerCAmelCase = target_dict.eos_index _lowerCAmelCase = len(target_dict.symbols ) _lowerCAmelCase = os.path.join(snake_case_ , """vocab.json""" ) if not os.path.isdir(snake_case_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case_ ) ) return os.makedirs(snake_case_ , exist_ok=snake_case_ ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , snake_case_ ) _lowerCAmelCase = WavaVecaCTCTokenizer( snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case_ , ) _lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False _lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , ) _lowerCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ ) processor.save_pretrained(snake_case_ ) _lowerCAmelCase = HubertForCTC(snake_case_ ) else: _lowerCAmelCase = HubertModel(snake_case_ ) if is_finetuned: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowerCAmelCase = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
317
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size def A__ (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None def A__ (self ): '''simple docstring''' _lowerCAmelCase = MobileNetVaImageProcessingTester(self ) @property def A__ (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ (self ): '''simple docstring''' pass def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
317
1
"""simple docstring""" import random from typing import Any def __UpperCAmelCase ( snake_case_ : list ) -> list[Any]: """simple docstring""" for _ in range(len(snake_case_ ) ): _lowerCAmelCase = random.randint(0 , len(snake_case_ ) - 1 ) _lowerCAmelCase = random.randint(0 , len(snake_case_ ) - 1 ) _lowerCAmelCase , _lowerCAmelCase = data[b], data[a] return data if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[str] = [0, 1, 2, 3, 4, 5, 6, 7] SCREAMING_SNAKE_CASE : List[Any] = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" for i in range(len(snake_case_ ) - 1 , 0 , -1 ): _lowerCAmelCase = False for j in range(snake_case_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j] _lowerCAmelCase = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j] _lowerCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')] print(F'{cocktail_shaker_sort(unsorted) = }')
317
1
"""simple docstring""" import numpy as np import qiskit def __UpperCAmelCase ( snake_case_ : int = 8 , snake_case_ : int | None = None ) -> str: """simple docstring""" _lowerCAmelCase = np.random.default_rng(seed=snake_case_ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _lowerCAmelCase = 6 * key_len # Measurement basis for Alice's qubits. _lowerCAmelCase = rng.integers(2 , size=snake_case_ ) # The set of states Alice will prepare. _lowerCAmelCase = rng.integers(2 , size=snake_case_ ) # Measurement basis for Bob's qubits. _lowerCAmelCase = rng.integers(2 , size=snake_case_ ) # Quantum Circuit to simulate BB84 _lowerCAmelCase = qiskit.QuantumCircuit(snake_case_ , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case_ ): if alice_state[index] == 1: bbaa_circ.x(snake_case_ ) if alice_basis[index] == 1: bbaa_circ.h(snake_case_ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case_ ): if bob_basis[index] == 1: bbaa_circ.h(snake_case_ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _lowerCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1 , seed_simulator=snake_case_ ) # Returns the result of measurement. _lowerCAmelCase = job.result().get_counts(snake_case_ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _lowerCAmelCase = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case_ , snake_case_ , snake_case_ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. _lowerCAmelCase = gen_key[:key_len] if len(snake_case_ ) >= key_len else gen_key.ljust(snake_case_ , """0""" ) return key if __name__ == "__main__": print(F'The generated key is : {bbaa(8, seed=0)}') from doctest import testmod testmod()
317
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple: """simple docstring""" def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: """simple docstring""" _lowerCAmelCase = random.Random() _lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "TensorFlow" @property def A__ (self ): '''simple docstring''' return tf.__version__ def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) _lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients _lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ (self , lowerCamelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) _lowerCAmelCase = meminfo.used _lowerCAmelCase = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase = None else: _lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase ) _lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase = stop_memory_tracing(lowerCamelCase ) if memory is None: _lowerCAmelCase = summary.total else: _lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 10**9 ) -> int: """simple docstring""" _lowerCAmelCase = 1 _lowerCAmelCase = 2 _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _lowerCAmelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'{solution() = }')
317
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { '''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'transfo-xl' __UpperCamelCase = ['mems'] __UpperCamelCase = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ): '''simple docstring''' _lowerCAmelCase = vocab_size _lowerCAmelCase = [] self.cutoffs.extend(lowerCamelCase ) if proj_share_all_but_first: _lowerCAmelCase = [False] + [True] * len(self.cutoffs ) else: _lowerCAmelCase = [False] + [False] * len(self.cutoffs ) _lowerCAmelCase = d_model _lowerCAmelCase = d_embed _lowerCAmelCase = d_head _lowerCAmelCase = d_inner _lowerCAmelCase = div_val _lowerCAmelCase = pre_lnorm _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = mem_len _lowerCAmelCase = same_length _lowerCAmelCase = attn_type _lowerCAmelCase = clamp_len _lowerCAmelCase = sample_softmax _lowerCAmelCase = adaptive _lowerCAmelCase = dropout _lowerCAmelCase = dropatt _lowerCAmelCase = untie_r _lowerCAmelCase = init _lowerCAmelCase = init_range _lowerCAmelCase = proj_init_std _lowerCAmelCase = init_std _lowerCAmelCase = layer_norm_epsilon super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase ) @property def A__ (self ): '''simple docstring''' logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def A__ (self , lowerCamelCase ): '''simple docstring''' raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
317
1
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'efficientnet' def __init__(self , lowerCamelCase = 3 , lowerCamelCase = 600 , lowerCamelCase = 2.0 , lowerCamelCase = 3.1 , lowerCamelCase = 8 , lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase = [] , lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase = 0.25 , lowerCamelCase = "swish" , lowerCamelCase = 2_560 , lowerCamelCase = "mean" , lowerCamelCase = 0.02 , lowerCamelCase = 0.001 , lowerCamelCase = 0.99 , lowerCamelCase = 0.5 , lowerCamelCase = 0.2 , **lowerCamelCase , ): '''simple docstring''' super().__init__(**lowerCamelCase ) _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = width_coefficient _lowerCAmelCase = depth_coefficient _lowerCAmelCase = depth_divisor _lowerCAmelCase = kernel_sizes _lowerCAmelCase = in_channels _lowerCAmelCase = out_channels _lowerCAmelCase = depthwise_padding _lowerCAmelCase = strides _lowerCAmelCase = num_block_repeats _lowerCAmelCase = expand_ratios _lowerCAmelCase = squeeze_expansion_ratio _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dim _lowerCAmelCase = pooling_type _lowerCAmelCase = initializer_range _lowerCAmelCase = batch_norm_eps _lowerCAmelCase = batch_norm_momentum _lowerCAmelCase = dropout_rate _lowerCAmelCase = drop_connect_rate _lowerCAmelCase = sum(lowerCamelCase ) * 4 class __lowerCamelCase ( __lowercase ): __UpperCamelCase = version.parse('1.11' ) @property def A__ (self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def A__ (self ): '''simple docstring''' return 1e-5
317
"""simple docstring""" import math def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 2 _lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowerCAmelCase = [True] * (end + 1) _lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowerCAmelCase = False start += 1 prime += in_prime _lowerCAmelCase = end + 1 _lowerCAmelCase = min(2 * end , snake_case_ ) while low <= n: _lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: _lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowerCAmelCase = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowerCAmelCase = high + 1 _lowerCAmelCase = min(high + end , snake_case_ ) return prime print(sieve(1_0**6))
317
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = {'''vocab_file''': '''vocab.txt'''} SCREAMING_SNAKE_CASE : Union[str, Any] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } SCREAMING_SNAKE_CASE : int = { '''YituTech/conv-bert-base''': 5_1_2, '''YituTech/conv-bert-medium-small''': 5_1_2, '''YituTech/conv-bert-small''': 5_1_2, } SCREAMING_SNAKE_CASE : Dict = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ConvBertTokenizer def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ): '''simple docstring''' super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars ): _lowerCAmelCase = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) ) _lowerCAmelCase = do_lower_case _lowerCAmelCase = strip_accents _lowerCAmelCase = tokenize_chinese_chars _lowerCAmelCase = normalizer_class(**lowerCamelCase ) _lowerCAmelCase = do_lower_case def A__ (self , lowerCamelCase , lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
317
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0 SCREAMING_SNAKE_CASE : Optional[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = '''''' SCREAMING_SNAKE_CASE : List[Any] = '''''' SCREAMING_SNAKE_CASE : Dict = 2_5_0 def __UpperCAmelCase ( ) -> None: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ ) for index in range(snake_case_ ): _lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase = random_chars(32 ) _lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] _lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) _lowerCAmelCase = [] for anno in new_annos: _lowerCAmelCase = anno[3] - anno[1] _lowerCAmelCase = anno[4] - anno[2] _lowerCAmelCase = anno[1] + width / 2 _lowerCAmelCase = anno[2] + height / 2 _lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(snake_case_ ) with open(F"""{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ): _lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(snake_case_ ) as in_file: _lowerCAmelCase = in_file.readlines() _lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" ) _lowerCAmelCase = [] for obj_list in obj_lists: _lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ ) _lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2 _lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2 _lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(snake_case_ ) labels.append(snake_case_ ) return img_paths, labels def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" _lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCAmelCase = int(scale_x * output_size[1] ) _lowerCAmelCase = int(scale_y * output_size[0] ) _lowerCAmelCase = [] _lowerCAmelCase = [] for i, index in enumerate(snake_case_ ): _lowerCAmelCase = all_img_list[index] path_list.append(snake_case_ ) _lowerCAmelCase = all_annos[index] _lowerCAmelCase = cva.imread(snake_case_ ) if i == 0: # top-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = bbox[2] * scale_y _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = bbox[1] * scale_x _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = bbox[3] * scale_x _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _lowerCAmelCase = cva.resize( snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _lowerCAmelCase = img for bbox in img_annos: _lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y) _lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x) _lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _lowerCAmelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __UpperCAmelCase ( snake_case_ : int ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase = ascii_lowercase + digits return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
317
1
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE : Any = ''' Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` ''' def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any=8 ) -> str: """simple docstring""" _lowerCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _lowerCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __lowerCamelCase ( __lowercase ): def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , ) _lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if latents is None: _lowerCAmelCase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) _lowerCAmelCase = latents.to(lowerCamelCase ) _lowerCAmelCase = latents * scheduler.init_noise_sigma return latents def A__ (self , lowerCamelCase=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _lowerCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) _lowerCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase , lowerCamelCase ) def A__ (self , lowerCamelCase=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _lowerCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _lowerCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: _lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase ) # We'll offload the last model manually. _lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A__ (self ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase ) def __call__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ): '''simple docstring''' _lowerCAmelCase = self._execution_device _lowerCAmelCase = guidance_scale > 1.0 if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) if isinstance(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 ) _lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _lowerCAmelCase = image_embeds.repeat_interleave(lowerCamelCase , dim=0 ) _lowerCAmelCase = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 ) _lowerCAmelCase = hint.repeat_interleave(lowerCamelCase , dim=0 ) _lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase ) _lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase ) self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase ) _lowerCAmelCase = self.scheduler.timesteps _lowerCAmelCase = self.movq.config.latent_channels _lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(lowerCamelCase , lowerCamelCase , self.movq_scale_factor ) # create initial latent _lowerCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase = {"""image_embeds""": image_embeds, """hint""": hint} _lowerCAmelCase = self.unet( sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0] if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) _lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 ) _lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 ) _lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step( lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , )[0] # post-processing _lowerCAmelCase = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _lowerCAmelCase = image * 0.5 + 0.5 _lowerCAmelCase = image.clamp(0 , 1 ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
317
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main _lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
317
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase , ) assert hasattr(self , """env""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings _lowerCAmelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="""py36""" , ) def A__ (self , lowerCamelCase ): '''simple docstring''' TrainingJobAnalytics(lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.create_estimator(lowerCamelCase ) # run training estimator.fit() # result dataframe _lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) _lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _lowerCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase )
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE : Optional[Any] = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'facebook/nllb-200-distilled-600M' __UpperCamelCase = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) __UpperCamelCase = 'translator' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = LANGUAGE_CODES __UpperCamelCase = ['text', 'text', 'text'] __UpperCamelCase = ['text'] def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.model.generate(**lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
317
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) class __lowerCamelCase ( __lowercase ): def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
317
"""simple docstring""" from math import isqrt def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , snake_case_ , snake_case_ ): _lowerCAmelCase = False return [i for i in range(2 , snake_case_ ) if is_prime[i]] def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int: """simple docstring""" _lowerCAmelCase = calculate_prime_numbers(max_number // 2 ) _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = len(snake_case_ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'{solution() = }')
317
1
"""simple docstring""" import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 128 , lowerCamelCase=[16, 32, 64, 128] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.02 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 128 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ): '''simple docstring''' _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride _lowerCAmelCase = num_attention_outputs _lowerCAmelCase = embed_dim _lowerCAmelCase = embed_dim + 1 _lowerCAmelCase = resolution _lowerCAmelCase = depths _lowerCAmelCase = hidden_sizes _lowerCAmelCase = dim _lowerCAmelCase = mlp_expansion_ratio def A__ (self ): '''simple docstring''' _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def A__ (self ): '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = TFEfficientFormerModel(config=lowerCamelCase ) _lowerCAmelCase = model(lowerCamelCase , training=lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.type_sequence_label_size _lowerCAmelCase = TFEfficientFormerForImageClassification(lowerCamelCase ) _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = TFEfficientFormerForImageClassification(lowerCamelCase ) _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ): __UpperCamelCase = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __UpperCamelCase = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def A__ (self ): '''simple docstring''' _lowerCAmelCase = TFEfficientFormerModelTester(self ) _lowerCAmelCase = ConfigTester( self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def A__ (self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def A__ (self ): '''simple docstring''' pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def A__ (self ): '''simple docstring''' pass def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(lowerCamelCase ) _lowerCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def A__ (self ): '''simple docstring''' def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = model_class(lowerCamelCase ) _lowerCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) _lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) if hasattr(self.model_tester , """encoder_seq_length""" ): _lowerCAmelCase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: _lowerCAmelCase = seq_length * self.model_tester.chunk_length else: _lowerCAmelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _lowerCAmelCase = outputs.decoder_hidden_states self.asseretIsInstance(lowerCamelCase , (list, tuple) ) self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) _lowerCAmelCase = getattr(self.model_tester , """seq_length""" , lowerCamelCase ) _lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , lowerCamelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def A__ (self ): '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = TFEfficientFormerModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True _lowerCAmelCase = getattr(self.model_tester , """seq_length""" , lowerCamelCase ) _lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , lowerCamelCase ) _lowerCAmelCase = getattr(self.model_tester , """key_length""" , lowerCamelCase ) _lowerCAmelCase = getattr(self.model_tester , """chunk_length""" , lowerCamelCase ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): _lowerCAmelCase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(lowerCamelCase ) _lowerCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) _lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = model_class(lowerCamelCase ) _lowerCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) _lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _lowerCAmelCase = model_class(lowerCamelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _lowerCAmelCase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _lowerCAmelCase = model(lowerCamelCase ) self.assertTrue(outputs_dict is not None ) def __UpperCAmelCase ( ) -> Dict: """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __lowerCamelCase ( unittest.TestCase ): @cached_property def A__ (self ): '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""tf""" ) # forward pass _lowerCAmelCase = model(**lowerCamelCase , training=lowerCamelCase ) # verify the logits _lowerCAmelCase = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) _lowerCAmelCase = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) ) @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""tf""" ) # forward pass _lowerCAmelCase = model(**lowerCamelCase , training=lowerCamelCase ) # verify the logits _lowerCAmelCase = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) _lowerCAmelCase = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
317
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowerCamelCase ( __lowercase ): __UpperCamelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCamelCase = 'CIDAS/clipseg-rd64-refined' __UpperCamelCase = 'image_segmenter' __UpperCamelCase = CLIPSegForImageSegmentation __UpperCamelCase = ['image', 'text'] __UpperCamelCase = ['image'] def __init__(self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*lowerCamelCase , **lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase = self.model(**lowerCamelCase ).logits return logits def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = outputs.cpu().detach().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
317
1
"""simple docstring""" import string import numpy def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int: """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , snake_case_ ) class __lowerCamelCase : __UpperCamelCase = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __UpperCamelCase = numpy.vectorize(lambda __lowercase : x % 36 ) __UpperCamelCase = numpy.vectorize(__lowercase ) def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.modulus(lowerCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _lowerCAmelCase = encrypt_key.shape[0] def A__ (self , lowerCamelCase ): '''simple docstring''' return self.key_string.index(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.key_string[round(lowerCamelCase )] def A__ (self ): '''simple docstring''' _lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowerCAmelCase = det % len(self.key_string ) _lowerCAmelCase = len(self.key_string ) if greatest_common_divisor(lowerCamelCase , len(self.key_string ) ) != 1: _lowerCAmelCase = ( f"""determinant modular {req_l} of encryption key({det}) """ f"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = [char for char in text.upper() if char in self.key_string] _lowerCAmelCase = chars[-1] while len(lowerCamelCase ) % self.break_key != 0: chars.append(lowerCamelCase ) return "".join(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.process_text(text.upper() ) _lowerCAmelCase = """""" for i in range(0 , len(lowerCamelCase ) - self.break_key + 1 , self.break_key ): _lowerCAmelCase = text[i : i + self.break_key] _lowerCAmelCase = [self.replace_letters(lowerCamelCase ) for char in batch] _lowerCAmelCase = numpy.array([vec] ).T _lowerCAmelCase = self.modulus(self.encrypt_key.dot(lowerCamelCase ) ).T.tolist()[ 0 ] _lowerCAmelCase = """""".join( self.replace_digits(lowerCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def A__ (self ): '''simple docstring''' _lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowerCAmelCase = det % len(self.key_string ) _lowerCAmelCase = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _lowerCAmelCase = i break _lowerCAmelCase = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(lowerCamelCase ) ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.make_decrypt_key() _lowerCAmelCase = self.process_text(text.upper() ) _lowerCAmelCase = """""" for i in range(0 , len(lowerCamelCase ) - self.break_key + 1 , self.break_key ): _lowerCAmelCase = text[i : i + self.break_key] _lowerCAmelCase = [self.replace_letters(lowerCamelCase ) for char in batch] _lowerCAmelCase = numpy.array([vec] ).T _lowerCAmelCase = self.modulus(decrypt_key.dot(lowerCamelCase ) ).T.tolist()[0] _lowerCAmelCase = """""".join( self.replace_digits(lowerCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __UpperCAmelCase ( ) -> None: """simple docstring""" _lowerCAmelCase = int(input("""Enter the order of the encryption key: """ ) ) _lowerCAmelCase = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(snake_case_ ): _lowerCAmelCase = [int(snake_case_ ) for x in input().split()] hill_matrix.append(snake_case_ ) _lowerCAmelCase = HillCipher(numpy.array(snake_case_ ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) _lowerCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": _lowerCAmelCase = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(snake_case_ ) ) elif option == "2": _lowerCAmelCase = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
317
"""simple docstring""" from __future__ import annotations import queue class __lowerCamelCase : def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def __UpperCAmelCase ( ) -> TreeNode: """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = F"""Enter the left node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = left_node q.put(snake_case_ ) _lowerCAmelCase = F"""Enter the right node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = right_node q.put(snake_case_ ) raise def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case_ ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(snake_case_ ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
317
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE : Optional[Any] = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'facebook/nllb-200-distilled-600M' __UpperCamelCase = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) __UpperCamelCase = 'translator' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = LANGUAGE_CODES __UpperCamelCase = ['text', 'text', 'text'] __UpperCamelCase = ['text'] def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) _lowerCAmelCase = self.lang_to_code[src_lang] _lowerCAmelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.model.generate(**lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
317
"""simple docstring""" from __future__ import annotations class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = text, pattern _lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase ) def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def A__ (self , lowerCamelCase ): '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def A__ (self ): '''simple docstring''' _lowerCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): _lowerCAmelCase = self.mismatch_in_text(lowerCamelCase ) if mismatch_index == -1: positions.append(lowerCamelCase ) else: _lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) _lowerCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions SCREAMING_SNAKE_CASE : Any = '''ABAABA''' SCREAMING_SNAKE_CASE : Optional[int] = '''AB''' SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern) SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
317
1
"""simple docstring""" from bisect import bisect from itertools import accumulate def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Tuple ) -> Dict: """simple docstring""" _lowerCAmelCase = sorted(zip(snake_case_ , snake_case_ ) , key=lambda snake_case_ : x[0] / x[1] , reverse=snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = [i[0] for i in r], [i[1] for i in r] _lowerCAmelCase = list(accumulate(snake_case_ ) ) _lowerCAmelCase = bisect(snake_case_ , snake_case_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : Any ) -> List[str]: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = [], [] while len(snake_case_ ) > 1: _lowerCAmelCase , _lowerCAmelCase = min(snake_case_ ), max(snake_case_ ) start.append(snake_case_ ) end.append(snake_case_ ) collection.remove(snake_case_ ) collection.remove(snake_case_ ) end.reverse() return start + collection + end if __name__ == "__main__": SCREAMING_SNAKE_CASE : Tuple = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : Any = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
317
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def A__ (self , lowerCamelCase , lowerCamelCase=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1e-3 ) def A__ (self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A__ (self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ (self ): '''simple docstring''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def A__ (self ): '''simple docstring''' _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
317
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule SCREAMING_SNAKE_CASE : List[str] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict: """simple docstring""" return getitem, k def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]: """simple docstring""" return setitem, k, v def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]: """simple docstring""" return delitem, k def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str: """simple docstring""" try: return fun(snake_case_ , *snake_case_ ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE : int = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) SCREAMING_SNAKE_CASE : List[Any] = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] SCREAMING_SNAKE_CASE : Any = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] SCREAMING_SNAKE_CASE : Union[str, Any] = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] SCREAMING_SNAKE_CASE : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple: """simple docstring""" _lowerCAmelCase = HashMap(initial_block_size=4 ) _lowerCAmelCase = {} for _, (fun, *args) in enumerate(snake_case_ ): _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ ) assert my_res == py_res assert str(snake_case_ ) == str(snake_case_ ) assert set(snake_case_ ) == set(snake_case_ ) assert len(snake_case_ ) == len(snake_case_ ) assert set(my.items() ) == set(py.items() ) def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" def is_public(snake_case_ : str ) -> bool: return not name.startswith("""_""" ) _lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )} _lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )} assert dict_public_names > hash_public_names
317
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 50 ) -> int: """simple docstring""" _lowerCAmelCase = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'{solution() = }')
350
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations(snake_case_ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( snake_case_ : int , snake_case_ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case_ ) for item in array ) _lowerCAmelCase = answer return answer _lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ ) def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int: """simple docstring""" _lowerCAmelCase = [0] * (target + 1) _lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(snake_case_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Any = 5 SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5] print(combination_sum_iv(n, array, target))
317
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = {'''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE : Tuple = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class __lowerCamelCase ( A__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = None def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ): '''simple docstring''' super().__init__( __A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space: _lowerCAmelCase = getattr(__A , pre_tok_state.pop("""type""" ) ) _lowerCAmelCase = add_prefix_space _lowerCAmelCase = pre_tok_class(**__A ) _lowerCAmelCase = add_prefix_space def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = kwargs.get("""is_split_into_words""" , __A ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" """ pretokenized inputs.""" ) return super()._batch_encode_plus(*__A , **__A ) def A__ (self , *lowerCamelCase , **lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = kwargs.get("""is_split_into_words""" , __A ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" """ pretokenized inputs.""" ) return super()._encode_plus(*__A , **__A ) def A__ (self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] ) if len(__A ) > self.model_max_length: _lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
351
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None: """simple docstring""" _lowerCAmelCase = "" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ): _lowerCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case_ ) return decoded def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]: """simple docstring""" _lowerCAmelCase = [] for key in product(snake_case_ , repeat=3 ): _lowerCAmelCase = try_key(snake_case_ , snake_case_ ) if encoded is not None: possibles.append(snake_case_ ) return possibles def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int: """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" ) _lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )] _lowerCAmelCase = filter_valid_chars(snake_case_ ) for common_word in COMMON_WORDS: _lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ ) if len(snake_case_ ) == 1: break _lowerCAmelCase = possibles[0] return sum(ord(snake_case_ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
317
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : Any = { '''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''], '''processing_mgp_str''': ['''MgpstrProcessor'''], '''tokenization_mgp_str''': ['''MgpstrTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = [ '''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MgpstrModel''', '''MgpstrPreTrainedModel''', '''MgpstrForSceneTextRecognition''', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
352
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int: """simple docstring""" _lowerCAmelCase = limit + 1 _lowerCAmelCase = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
317
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class __lowerCamelCase ( A__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
353
"""simple docstring""" from functools import reduce SCREAMING_SNAKE_CASE : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __UpperCAmelCase ( snake_case_ : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
317
0
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] _lowerCAmelCase = [] def generate(snake_case_ : int , snake_case_ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even _lowerCAmelCase = arr[k - 1], arr[i] else: # k is odd _lowerCAmelCase = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip() __SCREAMING_SNAKE_CASE : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
354
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int: """simple docstring""" try: _lowerCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) _lowerCAmelCase = 1 _lowerCAmelCase = 2 while i * i <= n: while n % i == 0: _lowerCAmelCase = i n //= i i += 1 if n > 1: _lowerCAmelCase = n return int(snake_case_ ) if __name__ == "__main__": print(F'{solution() = }')
317
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE : List[Any] = { '''configuration_efficientnet''': [ '''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientNetConfig''', '''EfficientNetOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = ['''EfficientNetImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = [ '''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientNetForImageClassification''', '''EfficientNetModel''', '''EfficientNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
355
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class __lowerCamelCase : __UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __UpperCamelCase = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) __UpperCamelCase = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) __UpperCamelCase = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) __UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} ) __UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} ) __UpperCamelCase = field( default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple: """simple docstring""" logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) ) def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) _lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCAmelCase = SeqaSeqDataset # Get datasets _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCAmelCase = ( dataset_class( snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCAmelCase = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) _lowerCAmelCase = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) _lowerCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowerCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCAmelCase = train_result.metrics _lowerCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _lowerCAmelCase = data_args.n_val _lowerCAmelCase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" ) _lowerCAmelCase = test_output.metrics _lowerCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _lowerCAmelCase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: _lowerCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) _lowerCAmelCase = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def __UpperCAmelCase ( snake_case_ : Any ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
317
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
356
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
0
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = "Hello world! cécé herlolip" def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> str: """simple docstring""" _lowerCAmelCase = FairseqRobertaModel.from_pretrained(__A ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" , __A ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(__A ) if classification_head else XLMRobertaXLForMaskedLM(__A ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads["""mnli"""].dense.weight _lowerCAmelCase = roberta.model.classification_heads["""mnli"""].dense.bias _lowerCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(__A ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(__A )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__A ) ) else: _lowerCAmelCase = roberta.model(__A )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(__A , __A , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(__A ).mkdir(parents=__A , exist_ok=__A ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) SCREAMING_SNAKE_CASE : str = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
357
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20} _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size def A__ (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None def A__ (self ): '''simple docstring''' _lowerCAmelCase = MobileNetVaImageProcessingTester(self ) @property def A__ (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ (self ): '''simple docstring''' pass def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
317
0
from itertools import product def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> List[str]: """simple docstring""" _lowerCAmelCase = sides_number _lowerCAmelCase = max_face_number * dice_number _lowerCAmelCase = [0] * (max_total + 1) _lowerCAmelCase = 1 _lowerCAmelCase = range(lowerCamelCase_ , max_face_number + 1 ) for dice_numbers in product(lowerCamelCase_ , repeat=lowerCamelCase_ ): _lowerCAmelCase = sum(lowerCamelCase_ ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = total_frequency_distribution( sides_number=4 , dice_number=9 ) _lowerCAmelCase = total_frequency_distribution( sides_number=6 , dice_number=6 ) _lowerCAmelCase = 0 _lowerCAmelCase = 9 _lowerCAmelCase = 4 * 9 _lowerCAmelCase = 6 for peter_total in range(lowerCamelCase_ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _lowerCAmelCase = (4**9) * (6**6) _lowerCAmelCase = peter_wins_count / total_games_number _lowerCAmelCase = round(lowerCamelCase_ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'{solution() = }')
358
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : list ) -> list: """simple docstring""" for i in range(len(snake_case_ ) - 1 , 0 , -1 ): _lowerCAmelCase = False for j in range(snake_case_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j] _lowerCAmelCase = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: _lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j] _lowerCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')] print(F'{cocktail_shaker_sort(unsorted) = }')
317
0