code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def A__ ( ): A = {} A = 2 while True: A = factor_map.pop(UpperCamelCase , UpperCamelCase ) if factor: A = factor + prime while x in factor_map: x += factor A = factor else: A = prime yield prime prime += 1 def A__ ( UpperCamelCase = 1E10 ): A = sieve() A = 1 while True: A = next(UpperCamelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCamelCase ) n += 2 if __name__ == "__main__": print(solution())
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = ['''image_processor''', '''tokenizer'''] UpperCamelCase = '''BlipImageProcessor''' UpperCamelCase = '''AutoTokenizer''' def __init__( self :Dict , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Dict , __UpperCamelCase :Any ): super().__init__(__UpperCamelCase , __UpperCamelCase ) # add QFormer tokenizer A = qformer_tokenizer def __call__( self :Any , __UpperCamelCase :ImageInput = None , __UpperCamelCase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase :bool = True , __UpperCamelCase :Union[bool, str, PaddingStrategy] = False , __UpperCamelCase :Union[bool, str, TruncationStrategy] = None , __UpperCamelCase :Optional[int] = None , __UpperCamelCase :int = 0 , __UpperCamelCase :Optional[int] = None , __UpperCamelCase :Optional[bool] = None , __UpperCamelCase :bool = False , __UpperCamelCase :bool = False , __UpperCamelCase :bool = False , __UpperCamelCase :bool = False , __UpperCamelCase :bool = False , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Union[str, TensorType]] = None , **__UpperCamelCase :List[str] , ): if images is None and text is None: raise ValueError("You have to specify at least images or text." ) A = BatchFeature() if text is not None: A = self.tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) encoding.update(__UpperCamelCase ) A = self.qformer_tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) A = qformer_text_encoding.pop("input_ids" ) A = qformer_text_encoding.pop("attention_mask" ) if images is not None: A = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase ) encoding.update(__UpperCamelCase ) return encoding def lowerCamelCase ( self :Any , *__UpperCamelCase :List[Any] , **__UpperCamelCase :str ): return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] , *__UpperCamelCase :List[Any] , **__UpperCamelCase :Tuple ): return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase ( self :Optional[Any] ): A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :Tuple , **__UpperCamelCase :List[Any] ): if os.path.isfile(__UpperCamelCase ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) A = os.path.join(__UpperCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__UpperCamelCase ) return super().save_pretrained(__UpperCamelCase , **__UpperCamelCase ) @classmethod def lowerCamelCase ( cls :Tuple , __UpperCamelCase :Dict , **__UpperCamelCase :Tuple ): A = AutoTokenizer.from_pretrained(__UpperCamelCase , subfolder="qformer_tokenizer" ) A = cls._get_arguments_from_pretrained(__UpperCamelCase , **__UpperCamelCase ) args.append(__UpperCamelCase ) return cls(*__UpperCamelCase )
292
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _snake_case : Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( lowercase_ ): def __init__( self :Optional[Any] , *__UpperCamelCase :Dict , **__UpperCamelCase :Tuple ): warnings.warn( "The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use OwlViTImageProcessor instead." , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
292
"""simple docstring""" def A__ ( UpperCamelCase ): A = generate_pascal_triangle(UpperCamelCase ) for row_idx in range(UpperCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [] for current_row_idx in range(UpperCamelCase ): A = populate_current_row(UpperCamelCase , UpperCamelCase ) triangle.append(UpperCamelCase ) return triangle def A__ ( UpperCamelCase , UpperCamelCase ): A = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 A, A = 1, 1 for current_col_idx in range(1 , UpperCamelCase ): calculate_current_element( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return current_row def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = triangle[current_row_idx - 1][current_col_idx - 1] A = triangle[current_row_idx - 1][current_col_idx] A = above_to_left_elt + above_to_right_elt def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [[1]] for row_index in range(1 , UpperCamelCase ): A = [0] + result[-1] + [0] A = row_index + 1 # Calculate the number of distinct elements in a row A = sum(divmod(UpperCamelCase , 2 ) ) A = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] A = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() A = row_first_half + row_second_half result.append(UpperCamelCase ) return result def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: A = F"{func.__name__}({value})" A = timeit(F"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
292
1
"""simple docstring""" from functools import lru_cache @lru_cache def A__ ( UpperCamelCase ): if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
292
"""simple docstring""" import math import sys def A__ ( UpperCamelCase ): A = "" try: with open(UpperCamelCase , "rb" ) as binary_file: A = binary_file.read() for dat in data: A = F"{dat:08b}" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = {"0": "0", "1": "1"} A, A = "", "" A = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A = lexicon[curr_string] result += last_match_id A = last_match_id + "0" if math.loga(UpperCamelCase ).is_integer(): A = {} for curr_key in list(UpperCamelCase ): A = lexicon.pop(UpperCamelCase ) A = new_lex A = last_match_id + "1" index += 1 A = "" return result def A__ ( UpperCamelCase , UpperCamelCase ): A = 8 try: with open(UpperCamelCase , "wb" ) as opened_file: A = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = 0 for letter in data_bits: if letter == "1": break counter += 1 A = data_bits[counter:] A = data_bits[counter + 1 :] return data_bits def A__ ( UpperCamelCase , UpperCamelCase ): A = read_file_binary(UpperCamelCase ) A = remove_prefix(UpperCamelCase ) A = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
292
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _snake_case : Tuple = logging.get_logger(__name__) class _UpperCAmelCase ( lowercase_ ): def __init__( self :Optional[Any] , *__UpperCamelCase :Dict , **__UpperCamelCase :Optional[int] ): warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
292
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
1
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) A = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b" A = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b" A = max(len(UpperCamelCase ) , len(UpperCamelCase ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase ) , b_binary.zfill(UpperCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
292
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
1
"""simple docstring""" class _UpperCAmelCase : def __init__( self :Optional[Any] , __UpperCamelCase :list ): A = set_counts A = max(__UpperCamelCase ) A = len(__UpperCamelCase ) A = [1] * num_sets A = list(range(__UpperCamelCase ) ) def lowerCamelCase ( self :str , __UpperCamelCase :int , __UpperCamelCase :int ): A = self.get_parent(__UpperCamelCase ) A = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] A = 0 A = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 A = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] A = 0 A = src_parent A = self.set_counts[src_parent] A = max(self.max_set , __UpperCamelCase ) return True def lowerCamelCase ( self :str , __UpperCamelCase :int ): if self.parents[disj_set] == disj_set: return disj_set A = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
292
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ): pass def A__ ( UpperCamelCase ): A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ): A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ): A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase ) import datasets A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowerCamelCase ( self :Optional[Any] ): pass @slow @require_torch def lowerCamelCase ( self :Optional[Any] ): A = "Intel/dpt-large" A = pipeline("depth-estimation" , model=__UpperCamelCase ) A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowerCamelCase ( self :Optional[Any] ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
292
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def lowerCamelCase ( self :Optional[int] ): A = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" ) # Using `do_sample=False` to force deterministic output A = text_generator("This is a test" , do_sample=__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ] , ) A = text_generator(["This is a test", "This is a second test"] ) self.assertEqual( __UpperCamelCase , [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ] , ) A = text_generator("This is a test" , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [ {"generated_token_ids": ANY(__UpperCamelCase )}, {"generated_token_ids": ANY(__UpperCamelCase )}, ] , ) A = text_generator.model.config.eos_token_id A = "<pad>" A = text_generator( ["This is a test", "This is a second test"] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , ) self.assertEqual( __UpperCamelCase , [ [ {"generated_token_ids": ANY(__UpperCamelCase )}, {"generated_token_ids": ANY(__UpperCamelCase )}, ], [ {"generated_token_ids": ANY(__UpperCamelCase )}, {"generated_token_ids": ANY(__UpperCamelCase )}, ], ] , ) @require_tf def lowerCamelCase ( self :List[Any] ): A = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" ) # Using `do_sample=False` to force deterministic output A = text_generator("This is a test" , do_sample=__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ] , ) A = text_generator(["This is a test", "This is a second test"] , do_sample=__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ] , ) def lowerCamelCase ( self :str , __UpperCamelCase :Any , __UpperCamelCase :List[str] , __UpperCamelCase :List[str] ): A = TextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase ) return text_generator, ["This is a test", "Another test"] def lowerCamelCase ( self :List[str] ): A = "Hello I believe in" A = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) A = text_generator(__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , ) A = text_generator(__UpperCamelCase , stop_sequence=" fe" ) self.assertEqual(__UpperCamelCase , [{"generated_text": "Hello I believe in fe"}] ) def lowerCamelCase ( self :Any , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[int] ): A = text_generator.model A = text_generator.tokenizer A = text_generator("This is a test" ) self.assertEqual(__UpperCamelCase , [{"generated_text": ANY(__UpperCamelCase )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) A = text_generator("This is a test" , return_full_text=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [{"generated_text": ANY(__UpperCamelCase )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) A = pipeline(task="text-generation" , model=__UpperCamelCase , tokenizer=__UpperCamelCase , return_full_text=__UpperCamelCase ) A = text_generator("This is a test" ) self.assertEqual(__UpperCamelCase , [{"generated_text": ANY(__UpperCamelCase )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) A = text_generator("This is a test" , return_full_text=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [{"generated_text": ANY(__UpperCamelCase )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) A = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [ [{"generated_text": ANY(__UpperCamelCase )}, {"generated_text": ANY(__UpperCamelCase )}], [{"generated_text": ANY(__UpperCamelCase )}, {"generated_text": ANY(__UpperCamelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: A = text_generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase ) self.assertEqual( __UpperCamelCase , [ [{"generated_text": ANY(__UpperCamelCase )}, {"generated_text": ANY(__UpperCamelCase )}], [{"generated_text": ANY(__UpperCamelCase )}, {"generated_text": ANY(__UpperCamelCase )}], ] , ) with self.assertRaises(__UpperCamelCase ): A = text_generator("test" , return_full_text=__UpperCamelCase , return_text=__UpperCamelCase ) with self.assertRaises(__UpperCamelCase ): A = text_generator("test" , return_full_text=__UpperCamelCase , return_tensors=__UpperCamelCase ) with self.assertRaises(__UpperCamelCase ): A = text_generator("test" , return_text=__UpperCamelCase , return_tensors=__UpperCamelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): A = text_generator("" ) self.assertEqual(__UpperCamelCase , [{"generated_text": ANY(__UpperCamelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): A = text_generator("" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. A = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("This is a test" * 5_00 , max_new_tokens=20 ) A = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCamelCase ): text_generator( "This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def lowerCamelCase ( self :Tuple ): import torch # Classic `model_kwargs` A = pipeline( model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) A = pipe("This is a test" ) self.assertEqual( __UpperCamelCase , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) A = pipe("This is a test" ) self.assertEqual( __UpperCamelCase , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) A = pipe("This is a test" ) self.assertEqual( __UpperCamelCase , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) @require_torch @require_torch_gpu def lowerCamelCase ( self :Dict ): import torch A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa ) pipe("This is a test" ) @require_torch @require_accelerate @require_torch_gpu def lowerCamelCase ( self :Dict ): import torch A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa ) pipe("This is a test" , do_sample=__UpperCamelCase , top_p=0.5 ) def lowerCamelCase ( self :Tuple ): A = "Hello world" A = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) if text_generator.model.framework == "tf": A = logging.get_logger("transformers.generation.tf_utils" ) else: A = logging.get_logger("transformers.generation.utils" ) A = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCamelCase ) as cl: A = text_generator(__UpperCamelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCamelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCamelCase ) as cl: A = text_generator(__UpperCamelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCamelCase , cl.out ) with CaptureLogger(__UpperCamelCase ) as cl: A = text_generator(__UpperCamelCase , max_length=10 ) self.assertNotIn(__UpperCamelCase , cl.out )
292
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
1
"""simple docstring""" import argparse import copy def A__ ( UpperCamelCase ): A = {} with open(UpperCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A = [] _list.append([line.split()[1], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A = [] _list.append([line.split()[0], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def A__ ( UpperCamelCase , UpperCamelCase ): with open(UpperCamelCase ) as f: A = f.read(1 ) A = start_node A = [] A = start_node A = 0 while visiting not in first_solution: A = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(UpperCamelCase ) and k[0] not in first_solution: A = k[1] A = k[0] first_solution.append(UpperCamelCase ) A = distance_of_first_solution + int(UpperCamelCase ) A = best_node first_solution.append(UpperCamelCase ) A = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def A__ ( UpperCamelCase , UpperCamelCase ): A = [] for n in solution[1:-1]: A = solution.index(UpperCamelCase ) for kn in solution[1:-1]: A = solution.index(UpperCamelCase ) if n == kn: continue A = copy.deepcopy(UpperCamelCase ) A = kn A = n A = 0 for k in _tmp[:-1]: A = _tmp[_tmp.index(UpperCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A = distance + int(i[1] ) _tmp.append(UpperCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda UpperCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 A = first_solution A = [] A = distance_of_first_solution A = solution while count <= iters: A = find_neighborhood(UpperCamelCase , UpperCamelCase ) A = 0 A = neighborhood[index_of_best_solution] A = len(UpperCamelCase ) - 1 A = False while not found: A = 0 while i < len(UpperCamelCase ): if best_solution[i] != solution[i]: A = best_solution[i] A = solution[i] break A = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A = True A = best_solution[:-1] A = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A = cost A = solution else: A = index_of_best_solution + 1 A = neighborhood[index_of_best_solution] if len(UpperCamelCase ) >= size: tabu_list.pop(0 ) A = count + 1 return best_solution_ever, best_cost def A__ ( UpperCamelCase=None ): A = generate_neighbours(args.File ) A, A = generate_first_solution( args.File , UpperCamelCase ) A, A = tabu_search( UpperCamelCase , UpperCamelCase , UpperCamelCase , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
292
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A__ ( UpperCamelCase = "laptop" ): A = F"https://www.amazon.in/laptop/s?k={product}" A = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles A = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A = item.ha.text A = "https://www.amazon.in/" + item.ha.a["href"] A = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A = "Not available" try: A = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A = "" try: A = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A = float("nan" ) except AttributeError: pass A = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A = " " A = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
292
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Union[str, Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ): A = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 ) A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): A = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = len(__UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__UpperCamelCase )}." ) # get prompt text embeddings A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A = text_input_ids[:, : self.tokenizer.model_max_length] A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A, A, A = text_embeddings.shape A = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A = 42 if negative_prompt is None: A = [""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=" f" {type(__UpperCamelCase )}." ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A = negative_prompt A = text_input_ids.shape[-1] A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , ) A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A = uncond_embeddings.shape[1] A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to( self.device ) else: A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A = {} if accepts_eta: A = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: A, A = noise_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = 1 / 0.18_215 * latents A = self.vae.decode(__UpperCamelCase ).sample A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
292
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self :Tuple ): A = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) A = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__UpperCamelCase ) , torch_builtin(__UpperCamelCase ) ) ) self.assertFalse(torch.allclose(gelu_python(__UpperCamelCase ) , gelu_new(__UpperCamelCase ) ) ) def lowerCamelCase ( self :Union[str, Any] ): A = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) A = get_activation("gelu" ) A = get_activation("gelu_10" ) A = torch_builtin(__UpperCamelCase ) A = geluaa(__UpperCamelCase ) A = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__UpperCamelCase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCamelCase ( self :Dict ): get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__UpperCamelCase ): get_activation("bogus" ) with self.assertRaises(__UpperCamelCase ): get_activation(__UpperCamelCase ) def lowerCamelCase ( self :str ): A = get_activation("gelu" ) A = 1 A = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__UpperCamelCase ): A = acta.a
292
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
1
"""simple docstring""" import math _snake_case : Any = 10 _snake_case : int = 7 _snake_case : List[Any] = BALLS_PER_COLOUR * NUM_COLOURS def A__ ( UpperCamelCase = 20 ): A = math.comb(UpperCamelCase , UpperCamelCase ) A = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase ) A = NUM_COLOURS * (1 - missing_colour / total) return F"{result:.9f}" if __name__ == "__main__": print(solution(20))
292
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
"""simple docstring""" from math import isqrt, loga def A__ ( UpperCamelCase ): A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase , UpperCamelCase ): A = False return [i for i in range(2 , UpperCamelCase ) if is_prime[i]] def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ): A = degree * loga(UpperCamelCase ) A = int(UpperCamelCase ) A = calculate_prime_numbers(UpperCamelCase ) A = 0 A = 0 A = len(UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
292
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Union[str, Any] = logging.get_logger(__name__) _snake_case : Optional[Any] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''donut-swin''' UpperCamelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Optional[Any] , __UpperCamelCase :int=2_24 , __UpperCamelCase :List[str]=4 , __UpperCamelCase :Optional[int]=3 , __UpperCamelCase :Dict=96 , __UpperCamelCase :List[Any]=[2, 2, 6, 2] , __UpperCamelCase :Dict=[3, 6, 12, 24] , __UpperCamelCase :Dict=7 , __UpperCamelCase :List[str]=4.0 , __UpperCamelCase :Any=True , __UpperCamelCase :str=0.0 , __UpperCamelCase :List[str]=0.0 , __UpperCamelCase :int=0.1 , __UpperCamelCase :int="gelu" , __UpperCamelCase :Optional[int]=False , __UpperCamelCase :List[Any]=0.02 , __UpperCamelCase :str=1e-5 , **__UpperCamelCase :Any , ): super().__init__(**__UpperCamelCase ) A = image_size A = patch_size A = num_channels A = embed_dim A = depths A = len(__UpperCamelCase ) A = num_heads A = window_size A = mlp_ratio A = qkv_bias A = hidden_dropout_prob A = attention_probs_dropout_prob A = drop_path_rate A = hidden_act A = use_absolute_embeddings A = layer_norm_eps A = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
292
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Union[str, Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
1
"""simple docstring""" import numpy as np import datasets _snake_case : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' _snake_case : Dict = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' _snake_case : Optional[Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase ( self :str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def lowerCamelCase ( self :str , __UpperCamelCase :Tuple , __UpperCamelCase :str ): # convert to numpy arrays A = np.array(__UpperCamelCase ) A = np.array(__UpperCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction A = X - np.mean(__UpperCamelCase ) A = np.cov(reference_distribution.T ) try: A = np.linalg.inv(__UpperCamelCase ) except np.linalg.LinAlgError: A = np.linalg.pinv(__UpperCamelCase ) A = np.dot(__UpperCamelCase , __UpperCamelCase ) A = np.dot(__UpperCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
292
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
1
"""simple docstring""" import math class _UpperCAmelCase : def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :list[list[float]] , __UpperCamelCase :list[int] ): A = 0.0 A = 0.0 for i in range(len(__UpperCamelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCamelCase ( self :int , __UpperCamelCase :list[list[int | float]] , __UpperCamelCase :list[int] , __UpperCamelCase :int , __UpperCamelCase :float ): for i in range(len(__UpperCamelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A__ ( ): # Training Examples ( m, n ) A = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) A = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training A = SelfOrganizingMap() A = 3 A = 0.5 for _ in range(UpperCamelCase ): for j in range(len(UpperCamelCase ) ): # training sample A = training_samples[j] # Compute the winning vector A = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase ) # Update the winning vector A = self_organizing_map.update(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # classify test sample A = [0, 0, 0, 1] A = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase ) # results print(F"Clusters that the test sample belongs to : {winner}" ) print(F"Weights that have been trained : {weights}" ) # running the main() function if __name__ == "__main__": main()
292
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A__ ( UpperCamelCase ): A = [False] * len(UpperCamelCase ) A = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): A = True A = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Dict , __UpperCamelCase :Tuple ): super().__init__() # make sure scheduler can always be converted to DDIM A = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self :Tuple , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :float = 0.0 , __UpperCamelCase :int = 50 , __UpperCamelCase :Optional[bool] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , __UpperCamelCase ): A = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A = self.unet(__UpperCamelCase , __UpperCamelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , eta=__UpperCamelCase , use_clipped_model_output=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample A = (image / 2 + 0.5).clamp(0 , 1 ) A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
292
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
1
"""simple docstring""" from sklearn.metrics import recall_score import datasets _snake_case : str = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n' _snake_case : Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n' _snake_case : Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase ( self :Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any]=None , __UpperCamelCase :Dict=1 , __UpperCamelCase :List[Any]="binary" , __UpperCamelCase :Tuple=None , __UpperCamelCase :Any="warn" , ): A = recall_score( __UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , ) return {"recall": float(__UpperCamelCase ) if score.size == 1 else score}
292
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _UpperCAmelCase : UpperCamelCase = None def lowerCamelCase ( self :List[Any] ): A = self.feature_extraction_class(**self.feat_extract_dict ) A = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) A = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) A = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Tuple ): A = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
292
1
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase ): A = [0 for i in range(r + 1 )] # nc0 = 1 A = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. A = min(UpperCamelCase , UpperCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
292
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = RoFormerTokenizer UpperCamelCase = RoFormerTokenizerFast UpperCamelCase = True UpperCamelCase = True def lowerCamelCase ( self :List[str] ): super().setUp() def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Any ): A = "永和服装饰品有限公司,今天天气非常好" A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def lowerCamelCase ( self :int ): A = self.get_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.get_rust_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Any ): pass def lowerCamelCase ( self :Tuple ): pass def lowerCamelCase ( self :List[str] ): pass
292
1
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase = False ): if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected string as input, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) A = input_str.split("_" ) A = 0 if use_pascal else 1 A = words[start_index:] A = [word[0].upper() + word[1:] for word in words_to_capitalize] A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
292
1
"""simple docstring""" def A__ ( UpperCamelCase = 600_851_475_143 ): try: A = int(UpperCamelCase ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A = 2 A = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A = i while n % i == 0: A = n // i i += 1 return int(UpperCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
292
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ): A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ): super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ): if latents is None: A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) A = latents.to(__UpperCamelCase ) A = latents * scheduler.init_noise_sigma return latents def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) A = torch.device(f"cuda:{gpu_id}" ) A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) A = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A = None for cpu_offloaded_model in [self.unet, self.movq]: A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase ) # We'll offload the last model manually. A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase ( self :str ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase ) def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ): A = self._execution_device A = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) A = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = hint.repeat_interleave(__UpperCamelCase , dim=0 ) A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase ) A = self.scheduler.timesteps A = self.movq.config.latent_channels A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor ) # create initial latent A = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = {"image_embeds": image_embeds, "hint": hint} A = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: A, A = noise_pred.split(latents.shape[1] , dim=1 ) A, A = noise_pred.chunk(2 ) A, A = variance_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A, A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: A = image * 0.5 + 0.5 A = image.clamp(0 , 1 ) A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
292
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _snake_case : List[str] = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[Any] = ['MobileViTFeatureExtractor'] _snake_case : List[str] = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Any ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :Union[str, Any] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ): A = ViTMSNModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = self.type_sequence_label_size A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Optional[Any] ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Optional[int] ): A = ViTMSNModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def lowerCamelCase ( self :Union[str, Any] ): pass def lowerCamelCase ( self :int ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Tuple ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :List[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTMSNModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def lowerCamelCase ( self :Any ): torch.manual_seed(2 ) A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
292
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _snake_case : str = logging.get_logger(__name__) _snake_case : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'} _snake_case : Tuple = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } _snake_case : List[str] = { 'moussaKam/mbarthez': 1024, 'moussaKam/barthez': 1024, 'moussaKam/barthez-orangesum-title': 1024, } _snake_case : Any = '▁' class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self :List[Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Any="<s>" , __UpperCamelCase :Dict="</s>" , __UpperCamelCase :Union[str, Any]="</s>" , __UpperCamelCase :Optional[Any]="<s>" , __UpperCamelCase :Tuple="<unk>" , __UpperCamelCase :List[Any]="<pad>" , __UpperCamelCase :List[str]="<mask>" , __UpperCamelCase :Optional[Dict[str, Any]] = None , **__UpperCamelCase :int , ): # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} A = len(self.sp_model ) - 1 A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase ( self :Any , __UpperCamelCase :List[int] , __UpperCamelCase :Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase ( self :List[Any] , __UpperCamelCase :List[int] , __UpperCamelCase :Optional[List[int]] = None , __UpperCamelCase :bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :List[int] , __UpperCamelCase :Optional[List[int]] = None ): A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase ( self :int ): return len(self.sp_model ) def lowerCamelCase ( self :str ): A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase ( self :Dict , __UpperCamelCase :str ): return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def lowerCamelCase ( self :str , __UpperCamelCase :Any ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(__UpperCamelCase ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__UpperCamelCase ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[Any] ): A = [] A = "" A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCamelCase ) + token A = True A = [] else: current_sub_tokens.append(__UpperCamelCase ) A = False out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def __getstate__( self :Tuple ): A = self.__dict__.copy() A = None return state def __setstate__( self :Dict , __UpperCamelCase :Union[str, Any] ): A = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase ( self :Dict , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , "wb" ) as fi: A = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = 42 class _UpperCAmelCase ( lowercase_ , lowercase_ ): @register_to_config def __init__( self :int , __UpperCamelCase :int = 3 , __UpperCamelCase :int = 3 , __UpperCamelCase :Tuple[str] = ("DownEncoderBlock2D",) , __UpperCamelCase :Tuple[str] = ("UpDecoderBlock2D",) , __UpperCamelCase :Tuple[int] = (64,) , __UpperCamelCase :int = 1 , __UpperCamelCase :str = "silu" , __UpperCamelCase :int = 3 , __UpperCamelCase :int = 32 , __UpperCamelCase :int = 2_56 , __UpperCamelCase :int = 32 , __UpperCamelCase :Optional[int] = None , __UpperCamelCase :float = 0.18_215 , __UpperCamelCase :str = "group" , ): super().__init__() # pass init params to Encoder A = Encoder( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , ) A = vq_embed_dim if vq_embed_dim is not None else latent_channels A = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 ) A = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase ) A = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 ) # pass init params to Decoder A = Decoder( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , ) @apply_forward_hook def lowerCamelCase ( self :Any , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :bool = True ): A = self.encoder(__UpperCamelCase ) A = self.quant_conv(__UpperCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=__UpperCamelCase ) @apply_forward_hook def lowerCamelCase ( self :Dict , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :bool = False , __UpperCamelCase :bool = True ): # also go through quantization layer if not force_not_quantize: A, A, A = self.quantize(__UpperCamelCase ) else: A = h A = self.post_quant_conv(__UpperCamelCase ) A = self.decoder(__UpperCamelCase , quant if self.config.norm_type == "spatial" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :bool = True ): A = sample A = self.encode(__UpperCamelCase ).latents A = self.decode(__UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCamelCase )
292
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _snake_case : Tuple = logging.get_logger(__name__) _snake_case : str = { 'post_extract_proj': 'feature_projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.upsample.0': 'encoder.upsample.projection', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): for attribute in key.split("." ): A = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A = getattr(UpperCamelCase , UpperCamelCase ).shape else: A = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": A = value elif weight_type == "weight_g": A = value elif weight_type == "weight_v": A = value elif weight_type == "bias": A = value else: A = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = [] A = fairseq_model.state_dict() A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): A = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A = True else: for key, mapped_key in MAPPING.items(): A = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A = True if "*" in mapped_key: A = name.split(UpperCamelCase )[0].split("." )[-2] A = mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A = "weight_g" elif "weight_v" in name: A = "weight_v" elif "weight" in name: A = "weight" elif "bias" in name: A = "bias" else: A = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = full_name.split("conv_layers." )[-1] A = name.split("." ) A = int(items[0] ) A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase ) def A__ ( UpperCamelCase , UpperCamelCase ): A = SEWConfig() if is_finetuned: A = model.wav_encoder.wav_model.cfg else: A = model.cfg A = fs_config.conv_bias A = eval(fs_config.conv_feature_layers ) A = [x[0] for x in conv_layers] A = [x[1] for x in conv_layers] A = [x[2] for x in conv_layers] A = "gelu" A = "layer" if fs_config.extractor_mode == "layer_norm" else "group" A = 0.0 A = fs_config.activation_fn.name A = fs_config.encoder_embed_dim A = 0.02 A = fs_config.encoder_ffn_embed_dim A = 1E-5 A = fs_config.encoder_layerdrop A = fs_config.encoder_attention_heads A = fs_config.conv_pos_groups A = fs_config.conv_pos A = len(UpperCamelCase ) A = fs_config.encoder_layers A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: A = model.cfg A = fs_config.final_dropout A = fs_config.layerdrop A = fs_config.activation_dropout A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 A = fs_config.attention_dropout A = fs_config.dropout_input A = fs_config.dropout A = fs_config.mask_channel_length A = fs_config.mask_channel_prob A = fs_config.mask_length A = fs_config.mask_prob A = "Wav2Vec2FeatureExtractor" A = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ): if is_finetuned: A, A, A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: A, A, A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: A = SEWConfig.from_pretrained(UpperCamelCase ) else: A = convert_config(model[0] , UpperCamelCase ) A = model[0].eval() A = True if config.feat_extract_norm == "layer" else False A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , ) if is_finetuned: if dict_path: A = Dictionary.load(UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A = target_dict.pad_index A = target_dict.bos_index A = target_dict.pad_index A = target_dict.bos_index A = target_dict.eos_index A = len(target_dict.symbols ) A = os.path.join(UpperCamelCase , "vocab.json" ) if not os.path.isdir(UpperCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase ) ) return os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with open(UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , UpperCamelCase ) A = WavaVecaCTCTokenizer( UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase , ) A = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A = SEWForCTC(UpperCamelCase ) else: A = SEWModel(UpperCamelCase ) feature_extractor.save_pretrained(UpperCamelCase ) recursively_load_weights(UpperCamelCase , UpperCamelCase , UpperCamelCase ) hf_model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) _snake_case : Tuple = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
292
"""simple docstring""" def A__ ( UpperCamelCase ): A = generate_pascal_triangle(UpperCamelCase ) for row_idx in range(UpperCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [] for current_row_idx in range(UpperCamelCase ): A = populate_current_row(UpperCamelCase , UpperCamelCase ) triangle.append(UpperCamelCase ) return triangle def A__ ( UpperCamelCase , UpperCamelCase ): A = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 A, A = 1, 1 for current_col_idx in range(1 , UpperCamelCase ): calculate_current_element( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return current_row def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = triangle[current_row_idx - 1][current_col_idx - 1] A = triangle[current_row_idx - 1][current_col_idx] A = above_to_left_elt + above_to_right_elt def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [[1]] for row_index in range(1 , UpperCamelCase ): A = [0] + result[-1] + [0] A = row_index + 1 # Calculate the number of distinct elements in a row A = sum(divmod(UpperCamelCase , 2 ) ) A = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] A = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() A = row_first_half + row_second_half result.append(UpperCamelCase ) return result def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: A = F"{func.__name__}({value})" A = timeit(F"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
292
1
"""simple docstring""" from __future__ import annotations _snake_case : int = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class _UpperCAmelCase : def __init__( self :Any , __UpperCamelCase :dict[str, list[str]] , __UpperCamelCase :str ): A = graph # mapping node to its parent in resulting breadth first tree A = {} A = source_vertex def lowerCamelCase ( self :str ): A = {self.source_vertex} A = None A = [self.source_vertex] # first in first out queue while queue: A = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__UpperCamelCase ) A = vertex queue.append(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): if target_vertex == self.source_vertex: return self.source_vertex A = self.parent.get(__UpperCamelCase ) if target_vertex_parent is None: A = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__UpperCamelCase ) return self.shortest_path(__UpperCamelCase ) + f"->{target_vertex}" if __name__ == "__main__": _snake_case : List[str] = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
292
"""simple docstring""" import math import sys def A__ ( UpperCamelCase ): A = "" try: with open(UpperCamelCase , "rb" ) as binary_file: A = binary_file.read() for dat in data: A = F"{dat:08b}" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = {"0": "0", "1": "1"} A, A = "", "" A = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A = lexicon[curr_string] result += last_match_id A = last_match_id + "0" if math.loga(UpperCamelCase ).is_integer(): A = {} for curr_key in list(UpperCamelCase ): A = lexicon.pop(UpperCamelCase ) A = new_lex A = last_match_id + "1" index += 1 A = "" return result def A__ ( UpperCamelCase , UpperCamelCase ): A = 8 try: with open(UpperCamelCase , "wb" ) as opened_file: A = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = 0 for letter in data_bits: if letter == "1": break counter += 1 A = data_bits[counter:] A = data_bits[counter + 1 :] return data_bits def A__ ( UpperCamelCase , UpperCamelCase ): A = read_file_binary(UpperCamelCase ) A = remove_prefix(UpperCamelCase ) A = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
292
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _UpperCAmelCase : def lowerCamelCase ( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Any , __UpperCamelCase :Union[str, Any] ): return None class _UpperCAmelCase : def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[Any] ): return None class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase ( self :Tuple ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase , "tf" , 12 , **__UpperCamelCase ) @require_torch @slow def lowerCamelCase ( self :List[str] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase , "pt" , 12 , **__UpperCamelCase ) @require_torch @slow def lowerCamelCase ( self :str ): from transformers import BertModel A = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__UpperCamelCase ) ) vocab_file.flush() A = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: A = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase , "pt" , 12 , __UpperCamelCase ) @require_tf @slow def lowerCamelCase ( self :Dict ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: A = self._test_export(__UpperCamelCase , "tf" , 12 , **__UpperCamelCase ) A = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def lowerCamelCase ( self :List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: A = self._test_export(__UpperCamelCase , "pt" , 12 , **__UpperCamelCase ) A = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :str , __UpperCamelCase :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :str=None , **__UpperCamelCase :int ): try: # Compute path with TemporaryDirectory() as tempdir: A = Path(__UpperCamelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def lowerCamelCase ( self :List[Any] ): from transformers import BertModel A = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) A = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , "pt" ) @require_tf @require_tokenizers @slow def lowerCamelCase ( self :List[Any] ): from transformers import TFBertModel A = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) A = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , "tf" ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Optional[Any] ): A = FeatureExtractionPipeline(__UpperCamelCase , __UpperCamelCase ) A = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] A, A, A, A = infer_shapes(__UpperCamelCase , __UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] , __UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def lowerCamelCase ( self :List[str] ): A = ["input_ids", "attention_mask", "token_type_ids"] A = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} A, A = ensure_valid_input(FuncContiguousArgs() , __UpperCamelCase , __UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) , set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) A, A = ensure_valid_input(FuncNonContiguousArgs() , __UpperCamelCase , __UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) , 1 ) self.assertEqual(len(__UpperCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def lowerCamelCase ( self :Any ): A = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
292
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
1
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class _UpperCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self :Tuple ): for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(__UpperCamelCase ): A = AutoConfig.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) A = FlaxAutoModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) @slow def lowerCamelCase ( self :Union[str, Any] ): for model_name in ["roberta-base", "roberta-large"]: with self.subTest(__UpperCamelCase ): A = AutoConfig.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) A = FlaxAutoModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) @slow def lowerCamelCase ( self :str ): for model_name in ["bert-base-cased", "bert-large-uncased"]: A = AutoTokenizer.from_pretrained(__UpperCamelCase ) A = FlaxBertModel.from_pretrained(__UpperCamelCase ) A = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**__UpperCamelCase :int ): return model(**__UpperCamelCase ) eval(**__UpperCamelCase ).block_until_ready() @slow def lowerCamelCase ( self :List[str] ): for model_name in ["roberta-base", "roberta-large"]: A = AutoTokenizer.from_pretrained(__UpperCamelCase ) A = FlaxRobertaModel.from_pretrained(__UpperCamelCase ) A = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**__UpperCamelCase :Dict ): return model(**__UpperCamelCase ) eval(**__UpperCamelCase ).block_until_ready() def lowerCamelCase ( self :Optional[Any] ): with self.assertRaisesRegex( __UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ): A = FlaxAutoModel.from_pretrained("bert-base" ) def lowerCamelCase ( self :str ): with self.assertRaisesRegex( __UpperCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): A = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision="aaaaaa" ) def lowerCamelCase ( self :List[str] ): with self.assertRaisesRegex( __UpperCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ): A = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def lowerCamelCase ( self :Dict ): with self.assertRaisesRegex(__UpperCamelCase , "Use `from_pt=True` to load this model" ): A = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
292
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
1
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path _snake_case : Optional[Any] = 'src/transformers' # Matches is_xxx_available() _snake_case : Optional[int] = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} _snake_case : Union[str, Any] = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _snake_case : Optional[Any] = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available _snake_case : Union[str, Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") _snake_case : List[Any] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _snake_case : str = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", _snake_case : Optional[int] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], _snake_case : Dict = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo _snake_case : Union[str, Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: _snake_case : Tuple = re.compile(r'^\s*try:') # Catches a line with else: _snake_case : str = re.compile(r'^\s*else:') def A__ ( UpperCamelCase ): if _re_test_backend.search(UpperCamelCase ) is None: return None A = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def A__ ( UpperCamelCase ): with open(UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: A = f.readlines() A = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure A = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): A = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] A = re.findall("\[([^\]]+)\]" , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue A = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 A = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): A = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: A = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(", " ) A = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: A = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(", " ) A = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend A = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): A = lines[line_index] A = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 A = {"none": objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): A = lines[line_index] A = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def A__ ( UpperCamelCase , UpperCamelCase ): def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] A = [] for key in import_dict_objects.keys(): A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" ) A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): A = "base imports" if key == "none" else F"{key} backend" errors.append(F"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F" {a} in _import_structure but not in TYPE_HINT." ) return errors def A__ ( ): A = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: A = os.path.join(UpperCamelCase , "__init__.py" ) A = parse_init(UpperCamelCase ) if objects is not None: A = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: A = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError("\n\n".join(UpperCamelCase ) ) def A__ ( ): A = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob("*.py" ) ) ) == 0: continue A = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) A = short_path.replace(os.path.sep , "." ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue A = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) A = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(UpperCamelCase ) return submodules _snake_case : Tuple = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def A__ ( ): # This is to make sure the transformers module imported is the one in the repo. A = importlib.util.spec_from_file_location( "transformers" , os.path.join(UpperCamelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) A = spec.loader.load_module() A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(UpperCamelCase ) > 0: A = "\n".join(F"- {module}" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
292
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ): pass def A__ ( UpperCamelCase ): A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ): A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ): A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase ) import datasets A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowerCamelCase ( self :Optional[Any] ): pass @slow @require_torch def lowerCamelCase ( self :Optional[Any] ): A = "Intel/dpt-large" A = pipeline("depth-estimation" , model=__UpperCamelCase ) A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowerCamelCase ( self :Optional[Any] ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
292
1
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _snake_case : Dict = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') _snake_case : int = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() _snake_case : str = '|'.join(sys.argv[1:]) _snake_case : Union[str, Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""") _snake_case : List[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
292
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
1
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase ): return int((input_a, input_a).count(1 ) != 0 ) def A__ ( ): assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
292
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A__ ( UpperCamelCase = "laptop" ): A = F"https://www.amazon.in/laptop/s?k={product}" A = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles A = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A = item.ha.text A = "https://www.amazon.in/" + item.ha.a["href"] A = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A = "Not available" try: A = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A = "" try: A = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A = float("nan" ) except AttributeError: pass A = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A = " " A = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
292
1
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :Any , __UpperCamelCase :Any=13 , __UpperCamelCase :List[Any]=32 , __UpperCamelCase :Union[str, Any]=2 , __UpperCamelCase :Dict=3 , __UpperCamelCase :Tuple=16 , __UpperCamelCase :Any=[1, 2, 1] , __UpperCamelCase :Optional[int]=[2, 2, 4] , __UpperCamelCase :Dict=2 , __UpperCamelCase :Optional[int]=2.0 , __UpperCamelCase :str=True , __UpperCamelCase :Tuple=0.0 , __UpperCamelCase :Optional[int]=0.0 , __UpperCamelCase :str=0.1 , __UpperCamelCase :str="gelu" , __UpperCamelCase :str=False , __UpperCamelCase :str=True , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=1e-5 , __UpperCamelCase :Dict=True , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Dict=10 , __UpperCamelCase :Optional[int]=8 , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = embed_dim A = depths A = num_heads A = window_size A = mlp_ratio A = qkv_bias A = hidden_dropout_prob A = attention_probs_dropout_prob A = drop_path_rate A = hidden_act A = use_absolute_embeddings A = patch_norm A = layer_norm_eps A = initializer_range A = is_training A = scope A = use_labels A = type_sequence_label_size A = encoder_stride def lowerCamelCase ( self :str ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :Any ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :int ): A = SwinvaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :Any ): A = SwinvaForMaskedImageModeling(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A = 1 A = SwinvaForMaskedImageModeling(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Union[str, Any] ): A = self.type_sequence_label_size A = SwinvaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Any ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) UpperCamelCase = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Union[str, Any] ): A = SwinvaModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 ) def lowerCamelCase ( self :str ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def lowerCamelCase ( self :Union[str, Any] ): pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def lowerCamelCase ( self :Optional[Any] ): pass def lowerCamelCase ( self :Optional[int] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Optional[int] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :Any ): A, A = self.model_tester.prepare_config_and_inputs_for_common() A = True for model_class in self.all_model_classes: A = True A = False A = True A = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) A = outputs.attentions A = len(self.model_tester.depths ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A = True A = config.window_size**2 A = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) A = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) A = len(__UpperCamelCase ) # Check attention is always last and order is fine A = True A = True A = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): A = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states A = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) ) A = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :List[str] , __UpperCamelCase :List[str] ): A = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) A = outputs.hidden_states A = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # Swinv2 has a different seq_length A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) A = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) A, A, A, A = reshaped_hidden_states[0].shape A = ( reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase ( self :Optional[Any] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: A = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :List[Any] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: A = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) ) def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :Tuple ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = SwinvaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() A = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: A = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def lowerCamelCase ( self :List[str] ): A = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __UpperCamelCase ) A = self.default_image_processor A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
292
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ): A = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 ) A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): A = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = len(__UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__UpperCamelCase )}." ) # get prompt text embeddings A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A = text_input_ids[:, : self.tokenizer.model_max_length] A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A, A, A = text_embeddings.shape A = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A = 42 if negative_prompt is None: A = [""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=" f" {type(__UpperCamelCase )}." ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A = negative_prompt A = text_input_ids.shape[-1] A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , ) A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A = uncond_embeddings.shape[1] A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to( self.device ) else: A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A = {} if accepts_eta: A = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: A, A = noise_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = 1 / 0.18_215 * latents A = self.vae.decode(__UpperCamelCase ).sample A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
292
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _snake_case : Optional[int] = logging.get_logger(__name__) def A__ ( UpperCamelCase ): if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(UpperCamelCase ): return [[videos]] raise ValueError(F"Could not make batched video from {videos}" ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = ['''pixel_values'''] def __init__( self :Optional[Any] , __UpperCamelCase :bool = True , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase :bool = True , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = True , __UpperCamelCase :Union[int, float] = 1 / 2_55 , __UpperCamelCase :bool = True , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , **__UpperCamelCase :str , ): super().__init__(**__UpperCamelCase ) A = size if size is not None else {"shortest_edge": 2_56} A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} A = get_size_dict(__UpperCamelCase , param_name="crop_size" ) A = do_resize A = size A = do_center_crop A = crop_size A = resample A = do_rescale A = rescale_factor A = offset A = do_normalize A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase ( self :str , __UpperCamelCase :np.ndarray , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :Dict , ): A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" in size: A = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase ) elif "height" in size and "width" in size: A = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :str , __UpperCamelCase :np.ndarray , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :str , ): A = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Union[int, float] , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :List[str] , ): A = image.astype(np.floataa ) if offset: A = image - (scale / 2) return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Union[float, List[float]] , __UpperCamelCase :Union[float, List[float]] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :List[str] , ): return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :ImageInput , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = None , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = None , __UpperCamelCase :float = None , __UpperCamelCase :bool = None , __UpperCamelCase :bool = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. A = to_numpy_array(__UpperCamelCase ) if do_resize: A = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) if do_center_crop: A = self.center_crop(__UpperCamelCase , size=__UpperCamelCase ) if do_rescale: A = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase ) if do_normalize: A = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) A = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) return image def lowerCamelCase ( self :str , __UpperCamelCase :ImageInput , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = None , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = None , __UpperCamelCase :float = None , __UpperCamelCase :bool = None , __UpperCamelCase :bool = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[str, TensorType]] = None , __UpperCamelCase :ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase :Tuple , ): A = do_resize if do_resize is not None else self.do_resize A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = offset if offset is not None else self.offset A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = size if size is not None else self.size A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(__UpperCamelCase , param_name="crop_size" ) if not valid_images(__UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) A = make_batched(__UpperCamelCase ) A = [ [ self._preprocess_image( image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , ) for img in video ] for video in videos ] A = {"pixel_values": videos} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
292
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
1
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _UpperCAmelCase ( lowercase_ ): def __init__( self :str , __UpperCamelCase :NestedDataStructureLike[PathLike] , __UpperCamelCase :Optional[NamedSplit] = None , __UpperCamelCase :Optional[Features] = None , __UpperCamelCase :str = None , __UpperCamelCase :bool = False , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[str] = None , __UpperCamelCase :Optional[int] = None , **__UpperCamelCase :Optional[Any] , ): super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) A = field A = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} A = Json( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , field=__UpperCamelCase , **__UpperCamelCase , ) def lowerCamelCase ( self :Optional[Any] ): # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) A = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : def __init__( self :Any , __UpperCamelCase :Dataset , __UpperCamelCase :Union[PathLike, BinaryIO] , __UpperCamelCase :Optional[int] = None , __UpperCamelCase :Optional[int] = None , **__UpperCamelCase :Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0." ) A = dataset A = path_or_buf A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A = num_proc A = "utf-8" A = to_json_kwargs def lowerCamelCase ( self :List[Any] ): A = self.to_json_kwargs.pop("path_or_buf" , __UpperCamelCase ) A = self.to_json_kwargs.pop("orient" , "records" ) A = self.to_json_kwargs.pop("lines" , True if orient == "records" else False ) A = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True ) A = self.to_json_kwargs.pop("compression" , __UpperCamelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f"`datasets` currently does not support {compression} compression" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , "wb" , compression=__UpperCamelCase ) as buffer: A = self._write(file_obj=__UpperCamelCase , orient=__UpperCamelCase , lines=__UpperCamelCase , index=__UpperCamelCase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( f"The compression parameter is not supported when writing to a buffer, but compression={compression}" " was passed. Please provide a local path instead." ) A = self._write( file_obj=self.path_or_buf , orient=__UpperCamelCase , lines=__UpperCamelCase , index=__UpperCamelCase , **self.to_json_kwargs ) return written def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[Any] ): A, A, A, A, A = args A = query_table( table=self.dataset.data , key=slice(__UpperCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) A = batch.to_pandas().to_json( path_or_buf=__UpperCamelCase , orient=__UpperCamelCase , lines=__UpperCamelCase , index=__UpperCamelCase , **__UpperCamelCase ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def lowerCamelCase ( self :Any , __UpperCamelCase :BinaryIO , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str , __UpperCamelCase :Dict , **__UpperCamelCase :int , ): A = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): A = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__UpperCamelCase ) else: A, A = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCamelCase , __UpperCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): written += file_obj.write(__UpperCamelCase ) return written
292
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
1
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = 1 , UpperCamelCase = 1.0E4 , UpperCamelCase = False , UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even" A = float(embedding_dim // 2 ) A = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) A = min_timescale * jnp.exp(jnp.arange(UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) A = jnp.expand_dims(UpperCamelCase , 1 ) * jnp.expand_dims(UpperCamelCase , 0 ) # scale embeddings A = scale * emb if flip_sin_to_cos: A = jnp.concatenate([jnp.cos(UpperCamelCase ), jnp.sin(UpperCamelCase )] , axis=1 ) else: A = jnp.concatenate([jnp.sin(UpperCamelCase ), jnp.cos(UpperCamelCase )] , axis=1 ) A = jnp.reshape(UpperCamelCase , [jnp.shape(UpperCamelCase )[0], embedding_dim] ) return signal class _UpperCAmelCase ( nn.Module ): UpperCamelCase = 3_2 UpperCamelCase = jnp.floataa @nn.compact def __call__( self :Any , __UpperCamelCase :Any ): A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__UpperCamelCase ) A = nn.silu(__UpperCamelCase ) A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__UpperCamelCase ) return temb class _UpperCAmelCase ( nn.Module ): UpperCamelCase = 3_2 UpperCamelCase = False UpperCamelCase = 1 @nn.compact def __call__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return get_sinusoidal_embeddings( __UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
292
"""simple docstring""" from math import isqrt, loga def A__ ( UpperCamelCase ): A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase , UpperCamelCase ): A = False return [i for i in range(2 , UpperCamelCase ) if is_prime[i]] def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ): A = degree * loga(UpperCamelCase ) A = int(UpperCamelCase ) A = calculate_prime_numbers(UpperCamelCase ) A = 0 A = 0 A = len(UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
292
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Tuple = logging.get_logger(__name__) _snake_case : Optional[Any] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''convbert''' def __init__( self :Union[str, Any] , __UpperCamelCase :List[str]=3_05_22 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Optional[int]=12 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :int=30_72 , __UpperCamelCase :Optional[Any]="gelu" , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=5_12 , __UpperCamelCase :List[Any]=2 , __UpperCamelCase :Union[str, Any]=0.02 , __UpperCamelCase :Any=1e-12 , __UpperCamelCase :str=1 , __UpperCamelCase :Union[str, Any]=0 , __UpperCamelCase :Union[str, Any]=2 , __UpperCamelCase :str=7_68 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Union[str, Any]=9 , __UpperCamelCase :str=1 , __UpperCamelCase :int=None , **__UpperCamelCase :int , ): super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = embedding_size A = head_ratio A = conv_kernel_size A = num_groups A = classifier_dropout class _UpperCAmelCase ( lowercase_ ): @property def lowerCamelCase ( self :Dict ): if self.task == "multiple-choice": A = {0: "batch", 1: "choice", 2: "sequence"} else: A = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
292
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Union[str, Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
1
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _UpperCAmelCase : def __init__( self :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any]=13 , __UpperCamelCase :int=64 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :str=3 , __UpperCamelCase :Any=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Dict=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :str=4 , __UpperCamelCase :str=37 , __UpperCamelCase :Optional[int]="gelu" , __UpperCamelCase :List[str]=0.1 , __UpperCamelCase :Any=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :List[Any]=[1, 16, 4, 4] , __UpperCamelCase :List[str]=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope A = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A = (self.image_size // 32) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Optional[int] ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :List[str] ): A = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[int] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] ): A = ViTHybridModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[Any] ): A = self.type_sequence_label_size A = ViTHybridForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :str ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Dict ): A = ViTHybridModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def lowerCamelCase ( self :List[str] ): pass def lowerCamelCase ( self :Dict ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Any ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() A = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: A = model_class(config=__UpperCamelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def lowerCamelCase ( self :Tuple ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTHybridModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Optional[int] ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self :Optional[Any] ): A = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) ) @slow @require_accelerate def lowerCamelCase ( self :Dict ): A = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" ) A = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" ) A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ) A = model(**__UpperCamelCase ) A = outputs.logits # model predicts one of the 1000 ImageNet classes A = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
292
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _UpperCAmelCase : def __init__( self :Optional[int] , __UpperCamelCase :Tuple=2 , __UpperCamelCase :List[Any]=3 , __UpperCamelCase :Any=64 , __UpperCamelCase :Tuple=None ): A = np.random.default_rng(__UpperCamelCase ) A = length A = rng.normal(size=(length,) ).astype(np.floataa ) A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self :Tuple ): return self.length def __getitem__( self :str , __UpperCamelCase :Optional[int] ): return {"x": self.x[i], "y": self.y[i]} class _UpperCAmelCase ( torch.nn.Module ): def __init__( self :Tuple , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Optional[int]=0 , __UpperCamelCase :Optional[int]=False ): super().__init__() A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) A = True def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int]=None ): if self.first_batch: print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) A = False return x * self.a[0] + self.b[0] class _UpperCAmelCase ( torch.nn.Module ): def __init__( self :str , __UpperCamelCase :List[str]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :str=False ): super().__init__() A = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() ) A = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() ) A = True def lowerCamelCase ( self :int , __UpperCamelCase :Union[str, Any]=None ): if self.first_batch: print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) A = False return x * self.a + self.b def A__ ( UpperCamelCase , UpperCamelCase = 16 ): from datasets import load_dataset from transformers import AutoTokenizer A = AutoTokenizer.from_pretrained("bert-base-cased" ) A = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} A = load_dataset("csv" , data_files=UpperCamelCase ) A = datasets["train"].unique("label" ) A = {v: i for i, v in enumerate(UpperCamelCase )} def tokenize_function(UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) A = tokenizer( examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) if "label" in examples: A = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["sentence1", "sentence2", "label"] , ) def collate_fn(UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. A = DataLoader(tokenized_datasets["train"] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=2 ) A = DataLoader(tokenized_datasets["validation"] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
292
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A__ ( UpperCamelCase ): A = [False] * len(UpperCamelCase ) A = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): A = True A = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
1
"""simple docstring""" import requests from bsa import BeautifulSoup def A__ ( UpperCamelCase = "https://www.worldometers.info/coronavirus" ): A = BeautifulSoup(requests.get(UpperCamelCase ).text , "html.parser" ) A = soup.findAll("h1" ) A = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase , UpperCamelCase )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(F"""{key}\n{value}\n""")
292
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
1
"""simple docstring""" _snake_case : Any = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = set() # keep track of all the paths to be checked A = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue A = queue.pop(0 ) # get the last node from the path A = path[-1] if node not in explored: A = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: A = list(UpperCamelCase ) new_path.append(UpperCamelCase ) queue.append(UpperCamelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(UpperCamelCase ) # in case there's no path between the 2 nodes return [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 A = [start] A = set(UpperCamelCase ) # Keep tab on distances from `start` node. A = {start: 0, target: -1} while queue: A = queue.pop(0 ) if node == target: A = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(UpperCamelCase ) queue.append(UpperCamelCase ) A = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
292
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _UpperCAmelCase : UpperCamelCase = None def lowerCamelCase ( self :List[Any] ): A = self.feature_extraction_class(**self.feat_extract_dict ) A = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) A = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) A = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Tuple ): A = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
292
1
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _snake_case : Tuple = 10 def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(UpperCamelCase , UpperCamelCase ): if array[i] == target: return i return -1 def A__ ( UpperCamelCase , UpperCamelCase ): A = 0 A = len(UpperCamelCase ) while left <= right: if right - left < precision: return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) A = (left + right) // 3 + 1 A = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: A = one_third - 1 elif array[two_third] < target: A = two_third + 1 else: A = one_third + 1 A = two_third - 1 else: return -1 def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): if left < right: if right - left < precision: return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) A = (left + right) // 3 + 1 A = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _snake_case : Optional[Any] = input('Enter numbers separated by comma:\n').strip() _snake_case : Optional[Any] = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." _snake_case : Optional[Any] = int(input('Enter the number to be found in the list:\n').strip()) _snake_case : str = ite_ternary_search(collection, target) _snake_case : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F"""Iterative search: {target} found at positions: {resulta}""") print(F"""Recursive search: {target} found at positions: {resulta}""") else: print('Not found')
292
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = RoFormerTokenizer UpperCamelCase = RoFormerTokenizerFast UpperCamelCase = True UpperCamelCase = True def lowerCamelCase ( self :List[str] ): super().setUp() def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Any ): A = "永和服装饰品有限公司,今天天气非常好" A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def lowerCamelCase ( self :int ): A = self.get_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.get_rust_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Any ): pass def lowerCamelCase ( self :Tuple ): pass def lowerCamelCase ( self :List[str] ): pass
292
1
"""simple docstring""" import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case : Union[str, Any] = getLogger(__name__) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 8 , UpperCamelCase = 1_024 , UpperCamelCase="val" , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase="summarization" , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase = None , UpperCamelCase="" , **UpperCamelCase , ): A = str(UpperCamelCase ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=UpperCamelCase ) A = Path(UpperCamelCase ) A = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(UpperCamelCase ) A = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ).cuda() if fpaa: A = model.half() # determine if we need to increase num_beams use_task_specific_params(UpperCamelCase , UpperCamelCase ) # update config with task specific params A = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: A = num_return_sequences A = AutoTokenizer.from_pretrained(UpperCamelCase ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: A = tokenizer.model_max_length if prefix is None: A = prefix or getattr(model.config , "prefix" , "" ) or "" A = SeqaSeqDataset( UpperCamelCase , UpperCamelCase , UpperCamelCase , max_target_length=1_024 , type_path=UpperCamelCase , n_obs=UpperCamelCase , prefix=UpperCamelCase , **UpperCamelCase , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. A = ds.make_sortish_sampler(UpperCamelCase , distributed=UpperCamelCase , add_extra_examples=UpperCamelCase , shuffle=UpperCamelCase ) A = DataLoader(UpperCamelCase , sampler=UpperCamelCase , batch_size=UpperCamelCase , collate_fn=ds.collate_fn ) A = [] for batch in tqdm(UpperCamelCase ): A = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=UpperCamelCase , num_beams=UpperCamelCase , **UpperCamelCase , ) A = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) A = batch["ids"] if num_return_sequences > 1: A = chunks(UpperCamelCase , UpperCamelCase ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(UpperCamelCase ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(UpperCamelCase , UpperCamelCase ) return results, sampler.num_replicas def A__ ( ): A = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=UpperCamelCase , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=UpperCamelCase , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=UpperCamelCase , default=UpperCamelCase ) parser.add_argument( "--type_path" , type=UpperCamelCase , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=UpperCamelCase , default=8 , required=UpperCamelCase , help="batch size" ) parser.add_argument( "--local_rank" , type=UpperCamelCase , default=-1 , required=UpperCamelCase , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=UpperCamelCase , default=1 , required=UpperCamelCase , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=UpperCamelCase , default=600 , required=UpperCamelCase , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase ) parser.add_argument("--tgt_lang" , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase ) parser.add_argument( "--prefix" , type=UpperCamelCase , required=UpperCamelCase , default=UpperCamelCase , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) A = time.time() A, A = parser.parse_known_args() A = parse_numeric_n_bool_cl_kwargs(UpperCamelCase ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) A = Path(args.save_dir + "_tmp" ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) # this handles locking. A = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. A = {} if args.src_lang is not None: A = args.src_lang if args.tgt_lang is not None: A = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=UpperCamelCase ) A, A = eval_data_dir( args.data_dir , UpperCamelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=UpperCamelCase , **UpperCamelCase , ) if args.local_rank <= 0: A = Path(args.save_dir ) save_dir.mkdir(exist_ok=UpperCamelCase ) A = gather_results_from_each_node(UpperCamelCase , UpperCamelCase , args.sync_timeout ) A = combine_partial_results(UpperCamelCase ) if args.num_return_sequences > 1: A = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(UpperCamelCase , UpperCamelCase ) return A = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(UpperCamelCase ) as f: A = [x.rstrip() for x in f.readlines()][: len(UpperCamelCase )] # Calculate metrics, save metrics, and save _generations.txt A = "translation" in args.task A = calculate_bleu if calc_bleu else calculate_rouge A = "bleu" if calc_bleu else "rouge" A = score_fn(UpperCamelCase , UpperCamelCase ) A = len(UpperCamelCase ) A = time.time() - start_time A = round(runtime / metrics["n_obs"] , 4 ) A = num_replicas # TODO(@stas00): add whatever metadata to metrics A = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(UpperCamelCase , UpperCamelCase , indent=UpperCamelCase ) print(UpperCamelCase ) write_txt_file(UpperCamelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(UpperCamelCase , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(UpperCamelCase ) def A__ ( UpperCamelCase ): A = [] for partial_result in partial_results: records.extend(UpperCamelCase ) A = sorted(UpperCamelCase , key=lambda UpperCamelCase : x["id"] ) A = [x["pred"] for x in records] return preds def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): # WAIT FOR lots of .json files A = time.time() logger.info("waiting for all nodes to finish" ) A = None while (time.time() - start_wait) < timeout: A = list(save_dir.glob("rank_*.json" ) ) if len(UpperCamelCase ) < num_replicas: continue try: # make sure all json files are fully saved A = lmap(UpperCamelCase , UpperCamelCase ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
292
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase = False ): if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected string as input, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) A = input_str.split("_" ) A = 0 if use_pascal else 1 A = words[start_index:] A = [word[0].upper() + word[1:] for word in words_to_capitalize] A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
292
1
"""simple docstring""" import re def A__ ( UpperCamelCase ): A = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(UpperCamelCase , UpperCamelCase ) ) if __name__ == "__main__": _snake_case : str = '0094702343221' print(is_sri_lankan_phone_number(phone))
292
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ): A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ): super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ): if latents is None: A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) A = latents.to(__UpperCamelCase ) A = latents * scheduler.init_noise_sigma return latents def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) A = torch.device(f"cuda:{gpu_id}" ) A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) A = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A = None for cpu_offloaded_model in [self.unet, self.movq]: A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase ) # We'll offload the last model manually. A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase ( self :str ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase ) def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ): A = self._execution_device A = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) A = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = hint.repeat_interleave(__UpperCamelCase , dim=0 ) A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase ) A = self.scheduler.timesteps A = self.movq.config.latent_channels A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor ) # create initial latent A = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = {"image_embeds": image_embeds, "hint": hint} A = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: A, A = noise_pred.split(latents.shape[1] , dim=1 ) A, A = noise_pred.chunk(2 ) A, A = variance_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A, A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: A = image * 0.5 + 0.5 A = image.clamp(0 , 1 ) A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
292
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : List[Any] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys _snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Any ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :Union[str, Any] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ): A = ViTMSNModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = self.type_sequence_label_size A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Optional[Any] ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Optional[int] ): A = ViTMSNModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def lowerCamelCase ( self :Union[str, Any] ): pass def lowerCamelCase ( self :int ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Tuple ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :List[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTMSNModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def lowerCamelCase ( self :Any ): torch.manual_seed(2 ) A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
292
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case : str = logging.get_logger(__name__) _snake_case : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } _snake_case : Union[str, Any] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def A__ ( UpperCamelCase ): A = EfficientNetConfig() A = CONFIG_MAP[model_name]["hidden_dim"] A = CONFIG_MAP[model_name]["width_coef"] A = CONFIG_MAP[model_name]["depth_coef"] A = CONFIG_MAP[model_name]["image_size"] A = CONFIG_MAP[model_name]["dropout_rate"] A = CONFIG_MAP[model_name]["dw_padding"] A = "huggingface/label-files" A = "imagenet-1k-id2label.json" A = 1_000 A = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) ) A = {int(UpperCamelCase ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} return config def A__ ( ): A = "http://images.cocodataset.org/val2017/000000039769.jpg" A = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im def A__ ( UpperCamelCase ): A = CONFIG_MAP[model_name]["image_size"] A = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=UpperCamelCase , ) return preprocessor def A__ ( UpperCamelCase ): A = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] A = sorted(set(UpperCamelCase ) ) A = len(UpperCamelCase ) A = {b: str(UpperCamelCase ) for b, i in zip(UpperCamelCase , range(UpperCamelCase ) )} A = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: A = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) A = {} for item in rename_keys: if item[0] in original_param_names: A = "efficientnet." + item[1] A = "classifier.weight" A = "classifier.bias" return key_mapping def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for key, value in tf_params.items(): if "normalization" in key: continue A = key_mapping[key] if "_conv" in key and "kernel" in key: A = torch.from_numpy(UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A = torch.from_numpy(UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A = torch.from_numpy(np.transpose(UpperCamelCase ) ) else: A = torch.from_numpy(UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(UpperCamelCase ) @torch.no_grad() def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = model_classes[model_name]( include_top=UpperCamelCase , weights="imagenet" , input_tensor=UpperCamelCase , input_shape=UpperCamelCase , pooling=UpperCamelCase , classes=1_000 , classifier_activation="softmax" , ) A = original_model.trainable_variables A = original_model.non_trainable_variables A = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A = param.numpy() A = list(tf_params.keys() ) # Load HuggingFace model A = get_efficientnet_config(UpperCamelCase ) A = EfficientNetForImageClassification(UpperCamelCase ).eval() A = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) A = rename_keys(UpperCamelCase ) replace_params(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Initialize preprocessor and preprocess input image A = convert_image_processor(UpperCamelCase ) A = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): A = hf_model(**UpperCamelCase ) A = outputs.logits.detach().numpy() # Original model inference A = False A = CONFIG_MAP[model_name]["image_size"] A = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A = image.img_to_array(UpperCamelCase ) A = np.expand_dims(UpperCamelCase , axis=0 ) A = original_model.predict(UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(UpperCamelCase ): os.mkdir(UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(UpperCamelCase ) preprocessor.save_pretrained(UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) A = F"efficientnet-{model_name}" preprocessor.push_to_hub(UpperCamelCase ) hf_model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') _snake_case : str = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _snake_case : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :Optional[int] , __UpperCamelCase :str ): super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self :Dict , __UpperCamelCase :int = 1 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[float] = None , __UpperCamelCase :bool = True , ): if audio_length_in_s is None: A = self.unet.config.sample_size / self.unet.config.sample_rate A = audio_length_in_s * self.unet.config.sample_rate A = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) A = int(__UpperCamelCase ) if sample_size % down_scale_factor != 0: A = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) A = int(__UpperCamelCase ) A = next(iter(self.unet.parameters() ) ).dtype A = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) # set step values self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device ) A = self.scheduler.timesteps.to(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A = self.unet(__UpperCamelCase , __UpperCamelCase ).sample # 2. compute previous image: x_t -> t_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample A = audio.clamp(-1 , 1 ).float().cpu().numpy() A = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__UpperCamelCase )
292
"""simple docstring""" def A__ ( UpperCamelCase ): A = generate_pascal_triangle(UpperCamelCase ) for row_idx in range(UpperCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [] for current_row_idx in range(UpperCamelCase ): A = populate_current_row(UpperCamelCase , UpperCamelCase ) triangle.append(UpperCamelCase ) return triangle def A__ ( UpperCamelCase , UpperCamelCase ): A = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 A, A = 1, 1 for current_col_idx in range(1 , UpperCamelCase ): calculate_current_element( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return current_row def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = triangle[current_row_idx - 1][current_col_idx - 1] A = triangle[current_row_idx - 1][current_col_idx] A = above_to_left_elt + above_to_right_elt def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [[1]] for row_index in range(1 , UpperCamelCase ): A = [0] + result[-1] + [0] A = row_index + 1 # Calculate the number of distinct elements in a row A = sum(divmod(UpperCamelCase , 2 ) ) A = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] A = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() A = row_first_half + row_second_half result.append(UpperCamelCase ) return result def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: A = F"{func.__name__}({value})" A = timeit(F"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
292
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _snake_case : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
"""simple docstring""" import math import sys def A__ ( UpperCamelCase ): A = "" try: with open(UpperCamelCase , "rb" ) as binary_file: A = binary_file.read() for dat in data: A = F"{dat:08b}" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = {"0": "0", "1": "1"} A, A = "", "" A = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A = lexicon[curr_string] result += last_match_id A = last_match_id + "0" if math.loga(UpperCamelCase ).is_integer(): A = {} for curr_key in list(UpperCamelCase ): A = lexicon.pop(UpperCamelCase ) A = new_lex A = last_match_id + "1" index += 1 A = "" return result def A__ ( UpperCamelCase , UpperCamelCase ): A = 8 try: with open(UpperCamelCase , "wb" ) as opened_file: A = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = 0 for letter in data_bits: if letter == "1": break counter += 1 A = data_bits[counter:] A = data_bits[counter + 1 :] return data_bits def A__ ( UpperCamelCase , UpperCamelCase ): A = read_file_binary(UpperCamelCase ) A = remove_prefix(UpperCamelCase ) A = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
292
1
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bza_file, BzipaExtractor), "gzip": (gz_file, GzipExtractor), "lz4": (lza_file, LzaExtractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } A, A = input_paths_and_base_extractors[compression_format] if input_path is None: A = F"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase ) assert base_extractor.is_extractable(UpperCamelCase ) A = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(UpperCamelCase , UpperCamelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name A = file_path.read_text(encoding="utf-8" ) else: A = output_path.read_text(encoding="utf-8" ) A = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = { "7z": seven_zip_file, "bz2": bza_file, "gzip": gz_file, "lz4": lza_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } A = input_paths[compression_format] if input_path is None: A = F"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase ) A = Extractor.infer_extractor_format(UpperCamelCase ) assert extractor_format is not None A = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name A = file_path.read_text(encoding="utf-8" ) else: A = output_path.read_text(encoding="utf-8" ) A = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.fixture def A__ ( UpperCamelCase , UpperCamelCase ): import tarfile A = tmp_path / "data_dot_dot" directory.mkdir() A = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(UpperCamelCase , "w" ) as f: f.add(UpperCamelCase , arcname=os.path.join(".." , text_file.name ) ) return path @pytest.fixture def A__ ( UpperCamelCase ): import tarfile A = tmp_path / "data_sym_link" directory.mkdir() A = directory / "tar_file_with_sym_link.tar" os.symlink(".." , directory / "subdir" , target_is_directory=UpperCamelCase ) with tarfile.TarFile(UpperCamelCase , "w" ) as f: f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } A = insecure_tar_files[insecure_tar_file] A = tmp_path / "extracted" TarExtractor.extract(UpperCamelCase , UpperCamelCase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def A__ ( UpperCamelCase ): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number A = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 A = ( B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb" ) as f: f.write(UpperCamelCase ) assert zipfile.is_zipfile(str(UpperCamelCase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(UpperCamelCase ) # but we're right
292
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
1
"""simple docstring""" from math import isqrt, loga def A__ ( UpperCamelCase ): A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase , UpperCamelCase ): A = False return [i for i in range(2 , UpperCamelCase ) if is_prime[i]] def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ): A = degree * loga(UpperCamelCase ) A = int(UpperCamelCase ) A = calculate_prime_numbers(UpperCamelCase ) A = 0 A = 0 A = len(UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
292
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
1
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( UpperCamelCase ): return (data["data"], data["target"]) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(UpperCamelCase , UpperCamelCase ) # Predict target for test data A = xgb.predict(UpperCamelCase ) A = predictions.reshape(len(UpperCamelCase ) , 1 ) return predictions def A__ ( ): A = fetch_california_housing() A, A = data_handling(UpperCamelCase ) A, A, A, A = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 ) A = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Error printing print(F"Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}" ) print(F"Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
292
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ): pass def A__ ( UpperCamelCase ): A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ): A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ): A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase ) import datasets A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowerCamelCase ( self :Optional[Any] ): pass @slow @require_torch def lowerCamelCase ( self :Optional[Any] ): A = "Intel/dpt-large" A = pipeline("depth-estimation" , model=__UpperCamelCase ) A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowerCamelCase ( self :Optional[Any] ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
292
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self :Optional[Any] ): A = tempfile.mkdtemp() A = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) A = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } A = os.path.join(self.tmpdirname , __UpperCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :str , **__UpperCamelCase :Dict ): return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] , **__UpperCamelCase :Union[str, Any] ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowerCamelCase ( self :Optional[int] ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self :Tuple ): A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase ( self :Tuple ): A = self.get_tokenizer() A = self.get_rust_tokenizer() A = self.get_image_processor() A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase ) A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] ): A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 ) A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def lowerCamelCase ( self :List[Any] ): A = self.get_image_processor() A = self.get_tokenizer() A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) A = self.prepare_image_inputs() A = image_processor(__UpperCamelCase , return_tensors="np" ) A = processor(images=__UpperCamelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase ( self :Union[str, Any] ): A = self.get_image_processor() A = self.get_tokenizer() A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) A = "lower newer" A = processor(text=__UpperCamelCase ) A = tokenizer(__UpperCamelCase , padding="max_length" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase ( self :Union[str, Any] ): A = self.get_image_processor() A = self.get_tokenizer() A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) A = "lower newer" A = self.prepare_image_inputs() A = processor(text=__UpperCamelCase , images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def lowerCamelCase ( self :Dict ): A = self.get_image_processor() A = self.get_tokenizer() A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A = processor.batch_decode(__UpperCamelCase ) A = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.get_image_processor() A = self.get_tokenizer() A = AlignProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) A = "lower newer" A = self.prepare_image_inputs() A = processor(text=__UpperCamelCase , images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
292
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
1
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path _snake_case : Any = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) _snake_case : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} _snake_case : Dict = 'zero2' _snake_case : str = 'zero3' _snake_case : Dict = [ZEROa, ZEROa] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param A = parameterized.to_safe_name("_".join(str(UpperCamelCase ) for x in param.args ) ) return F"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test _snake_case : List[str] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _UpperCAmelCase ( lowercase_ ): @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Dict ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :int ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :int = 10 , __UpperCamelCase :bool = True , __UpperCamelCase :bool = True , __UpperCamelCase :bool = True , ): A = models[model] A = self.run_trainer( stage=__UpperCamelCase , model_name=__UpperCamelCase , eval_steps=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) self.do_checks(__UpperCamelCase ) return output_dir def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :int = 10 , __UpperCamelCase :int = 1 , __UpperCamelCase :bool = True , __UpperCamelCase :bool = True , ): A = self.get_auto_remove_tmp_dir("./xxx" , after=__UpperCamelCase ) A = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__UpperCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files A = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() A = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] A = self.get_launcher(__UpperCamelCase ) A = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__UpperCamelCase , env=self.get_env() ) return output_dir def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[Any]=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) A = min(2 , get_gpu_count() ) if distributed else 1 return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
292
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A__ ( UpperCamelCase = "laptop" ): A = F"https://www.amazon.in/laptop/s?k={product}" A = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles A = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A = item.ha.text A = "https://www.amazon.in/" + item.ha.a["href"] A = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A = "Not available" try: A = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A = "" try: A = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A = float("nan" ) except AttributeError: pass A = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A = " " A = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
292
1
"""simple docstring""" def A__ ( UpperCamelCase ): return " ".join( "".join(word[::-1] ) if len(UpperCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
292
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ): A = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 ) A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): A = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = len(__UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__UpperCamelCase )}." ) # get prompt text embeddings A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A = text_input_ids[:, : self.tokenizer.model_max_length] A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A, A, A = text_embeddings.shape A = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A = 42 if negative_prompt is None: A = [""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=" f" {type(__UpperCamelCase )}." ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A = negative_prompt A = text_input_ids.shape[-1] A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , ) A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A = uncond_embeddings.shape[1] A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to( self.device ) else: A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A = {} if accepts_eta: A = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: A, A = noise_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = 1 / 0.18_215 * latents A = self.vae.decode(__UpperCamelCase ).sample A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
292
1
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def A__ ( UpperCamelCase , UpperCamelCase ): A = F"{sampling_rate}" A = "1" A = "f32le" A = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: A = ffmpeg_process.communicate(UpperCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error A = output_stream[0] A = np.frombuffer(UpperCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = "f32le" , ): A = F"{sampling_rate}" A = "1" if format_for_conversion == "s16le": A = 2 elif format_for_conversion == "f32le": A = 4 else: raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" ) A = platform.system() if system == "Linux": A = "alsa" A = "default" elif system == "Darwin": A = "avfoundation" A = ":0" elif system == "Windows": A = "dshow" A = "default" A = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] A = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample A = _ffmpeg_stream(UpperCamelCase , UpperCamelCase ) for item in iterator: yield item def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "f32le" , ): if stream_chunk_s is not None: A = stream_chunk_s else: A = chunk_length_s A = ffmpeg_microphone(UpperCamelCase , UpperCamelCase , format_for_conversion=UpperCamelCase ) if format_for_conversion == "s16le": A = np.intaa A = 2 elif format_for_conversion == "f32le": A = np.floataa A = 4 else: raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" ) if stride_length_s is None: A = chunk_length_s / 6 A = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(UpperCamelCase , (int, float) ): A = [stride_length_s, stride_length_s] A = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample A = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample A = datetime.datetime.now() A = datetime.timedelta(seconds=UpperCamelCase ) for item in chunk_bytes_iter(UpperCamelCase , UpperCamelCase , stride=(stride_left, stride_right) , stream=UpperCamelCase ): # Put everything back in numpy scale A = np.frombuffer(item["raw"] , dtype=UpperCamelCase ) A = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) A = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = False ): A = B"" A, A = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" ) A = 0 for raw in iterator: acc += raw if stream and len(UpperCamelCase ) < chunk_len: A = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(UpperCamelCase ) >= chunk_len: # We are flushing the accumulator A = (_stride_left, stride_right) A = {"raw": acc[:chunk_len], "stride": stride} if stream: A = False yield item A = stride_left A = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(UpperCamelCase ) > stride_left: A = {"raw": acc, "stride": (_stride_left, 0)} if stream: A = False yield item def A__ ( UpperCamelCase , UpperCamelCase ): A = 2**24 # 16Mo try: with subprocess.Popen(UpperCamelCase , stdout=subprocess.PIPE , bufsize=UpperCamelCase ) as ffmpeg_process: while True: A = ffmpeg_process.stdout.read(UpperCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
292
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
1
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def A__ ( UpperCamelCase ): if isinstance(UpperCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class _UpperCAmelCase : def lowerCamelCase ( self :str , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] ): pass def lowerCamelCase ( self :List[str] ): pass def lowerCamelCase ( self :Optional[Any] ): pass def lowerCamelCase ( self :Dict , __UpperCamelCase :np.ndarray , __UpperCamelCase :np.ndarray , __UpperCamelCase :float ): A = np.abs((a - b) ).max() self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , f"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Dict , __UpperCamelCase :List[Any]=None , **__UpperCamelCase :Any ): A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase ) A = FlaxVisionTextDualEncoderModel(__UpperCamelCase ) A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCamelCase ( self :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any]=None , **__UpperCamelCase :Union[str, Any] ): A, A = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase ) A = {"vision_model": vision_model, "text_model": text_model} A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase ) A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any]=None , **__UpperCamelCase :str ): A, A = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase ) A = {"vision_model": vision_model, "text_model": text_model} A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase ) A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase ) A = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCamelCase ) A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase ) A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase ) A = after_output[0] A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCamelCase , 1e-3 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :str=None , **__UpperCamelCase :List[str] ): A, A = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase ) A = {"vision_model": vision_model, "text_model": text_model} A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase ) A = model( input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase ) A = output.vision_model_output.attentions self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) A = to_atuple(vision_model.config.image_size ) A = to_atuple(vision_model.config.patch_size ) A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) A = output.text_model_output.attentions self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Tuple ): pt_model.to(__UpperCamelCase ) pt_model.eval() # prepare inputs A = inputs_dict A = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): A = pt_model(**__UpperCamelCase ).to_tuple() A = fx_model(**__UpperCamelCase ).to_tuple() self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__UpperCamelCase ) A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase ) A = fx_model_loaded(**__UpperCamelCase ).to_tuple() self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__UpperCamelCase ) A = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase ) pt_model_loaded.to(__UpperCamelCase ) pt_model_loaded.eval() with torch.no_grad(): A = pt_model_loaded(**__UpperCamelCase ).to_tuple() self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__UpperCamelCase , pt_output_loaded.numpy() , 4e-2 ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] ): A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase ) A = VisionTextDualEncoderModel(__UpperCamelCase ) A = FlaxVisionTextDualEncoderModel(__UpperCamelCase ) A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase ) A = fx_state self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ): A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase ) A = VisionTextDualEncoderModel(__UpperCamelCase ) A = FlaxVisionTextDualEncoderModel(__UpperCamelCase ) A = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params ) self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Any ): A = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.prepare_config_and_inputs() self.check_save_load(**__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__UpperCamelCase ) @is_pt_flax_cross_test def lowerCamelCase ( self :List[Any] ): A = self.prepare_config_and_inputs() A = config_inputs_dict.pop("vision_config" ) A = config_inputs_dict.pop("text_config" ) A = config_inputs_dict self.check_equivalence_pt_to_flax(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) self.check_equivalence_flax_to_pt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @slow def lowerCamelCase ( self :Dict ): A, A = self.get_pretrained_model_and_inputs() A = model_a(**__UpperCamelCase ) A = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__UpperCamelCase ) A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase ) A = model_a(**__UpperCamelCase ) A = after_outputs[0] A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCamelCase , 1e-5 ) @require_flax class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): def lowerCamelCase ( self :Optional[int] ): A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , ) A = 13 A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) A = random_attention_mask([batch_size, 4] ) A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCamelCase ( self :Any , __UpperCamelCase :Dict , __UpperCamelCase :List[Any] ): A = FlaxViTModel(__UpperCamelCase ) A = FlaxBertModel(__UpperCamelCase ) return vision_model, text_model def lowerCamelCase ( self :Optional[Any] ): A = FlaxViTModelTester(self ) A = FlaxBertModelTester(self ) A = vit_model_tester.prepare_config_and_inputs() A = bert_model_tester.prepare_config_and_inputs() A, A = vision_config_and_inputs A, A, A, A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): def lowerCamelCase ( self :List[Any] ): A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , ) A = 13 A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) A = random_attention_mask([batch_size, 4] ) A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = FlaxCLIPVisionModel(__UpperCamelCase ) A = FlaxBertModel(__UpperCamelCase ) return vision_model, text_model def lowerCamelCase ( self :Optional[int] ): A = FlaxCLIPVisionModelTester(self ) A = FlaxBertModelTester(self ) A = clip_model_tester.prepare_config_and_inputs() A = bert_model_tester.prepare_config_and_inputs() A, A = vision_config_and_inputs A, A, A, A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class _UpperCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self :Any ): A = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 ) A = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A = processor( text=["una foto di un gatto", "una foto di un cane"] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" ) A = model(**__UpperCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) A = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1e-3 ) )
292
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
1
"""simple docstring""" _snake_case : List[str] = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } _snake_case : List[Any] = {value: key for key, value in encode_dict.items()} def A__ ( UpperCamelCase ): A = "" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("encode() accepts only letters of the alphabet and spaces" ) return encoded def A__ ( UpperCamelCase ): if set(UpperCamelCase ) - {"A", "B", " "} != set(): raise Exception("decode() accepts only 'A', 'B' and spaces" ) A = "" for word in coded.split(): while len(UpperCamelCase ) != 0: decoded += decode_dict[word[:5]] A = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
292
"""simple docstring""" from math import isqrt, loga def A__ ( UpperCamelCase ): A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase , UpperCamelCase ): A = False return [i for i in range(2 , UpperCamelCase ) if is_prime[i]] def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ): A = degree * loga(UpperCamelCase ) A = int(UpperCamelCase ) A = calculate_prime_numbers(UpperCamelCase ) A = 0 A = 0 A = len(UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
292
1
"""simple docstring""" import math import random def A__ ( UpperCamelCase , UpperCamelCase = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value _snake_case : Optional[int] = 0.0_2 def A__ ( UpperCamelCase , UpperCamelCase ): A = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(UpperCamelCase ): # Forward propagation A = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A = (expected / 100) - layer_a # Error delta A = layer_1_error * sigmoid_function(UpperCamelCase , UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() _snake_case : Any = int(input('Expected value: ')) _snake_case : List[Any] = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
292
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Union[str, Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
1
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase ): A = len(UpperCamelCase ) print("The following activities are selected:" ) # The first activity is always selected A = 0 print(UpperCamelCase , end="," ) # Consider rest of the activities for j in range(UpperCamelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(UpperCamelCase , end="," ) A = j if __name__ == "__main__": import doctest doctest.testmod() _snake_case : Any = [1, 3, 0, 5, 8, 5] _snake_case : Optional[int] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
292
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
1
"""simple docstring""" import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger _snake_case : List[str] = get_logger(__name__) _snake_case : List[Any] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class _UpperCAmelCase : @add_start_docstrings(__UpperCamelCase ) def __call__( self :int , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class _UpperCAmelCase : @add_start_docstrings(__UpperCamelCase ) def __call__( self :Optional[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class _UpperCAmelCase ( lowercase_ ): @add_start_docstrings(__UpperCamelCase ) def __call__( self :str , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int , **__UpperCamelCase :Any ): for processor in self: A = inspect.signature(processor.__call__ ).parameters if len(__UpperCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys() )} for " f"{processor.__class__} are passed to the logits processor." ) A = processor(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) else: A = processor(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Tuple , __UpperCamelCase :float ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" ) A = temperature def __call__( self :Optional[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): A = scores / self.temperature return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :List[Any] , __UpperCamelCase :float , __UpperCamelCase :float = -float("Inf" ) , __UpperCamelCase :int = 1 ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" ) if not isinstance(__UpperCamelCase , __UpperCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" ) A = top_p A = filter_value A = min_tokens_to_keep def __call__( self :Dict , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): A, A = lax.top_k(__UpperCamelCase , scores.shape[-1] ) A = jnp.full_like(__UpperCamelCase , self.filter_value ) A = jax.nn.softmax(__UpperCamelCase , axis=-1 ).cumsum(axis=-1 ) A = cumulative_probs < self.top_p # include the token that is higher than top_p as well A = jnp.roll(__UpperCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(__UpperCamelCase ) # min tokens to keep A = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCamelCase ) A = jnp.where(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = jax.lax.sort_key_val(__UpperCamelCase , __UpperCamelCase )[-1] return next_scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :int , __UpperCamelCase :float = -float("Inf" ) , __UpperCamelCase :int = 1 ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" ) A = max(__UpperCamelCase , __UpperCamelCase ) A = filter_value def __call__( self :Tuple , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): A, A = scores.shape A = jnp.full(batch_size * vocab_size , self.filter_value ) A = min(self.top_k , scores.shape[-1] ) # Safety check A, A = lax.top_k(__UpperCamelCase , __UpperCamelCase ) A = jnp.broadcast_to((jnp.arange(__UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A = topk_scores.flatten() A = topk_indices.flatten() + shift A = next_scores_flat.at[topk_indices_flat].set(__UpperCamelCase ) A = next_scores_flat.reshape(__UpperCamelCase , __UpperCamelCase ) return next_scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :int ): A = bos_token_id def __call__( self :List[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): A = jnp.full(scores.shape , -float("inf" ) ) A = 1 - jnp.bool_(cur_len - 1 ) A = jnp.where(__UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __UpperCamelCase ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Tuple , __UpperCamelCase :int , __UpperCamelCase :int ): A = max_length A = eos_token_id def __call__( self :List[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): A = jnp.full(scores.shape , -float("inf" ) ) A = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A = jnp.where(__UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __UpperCamelCase ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :int ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" ) if not isinstance(__UpperCamelCase , __UpperCamelCase ) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" ) A = min_length A = eos_token_id def __call__( self :Any , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): # create boolean flag to decide if min length penalty should be applied A = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A = jnp.where(__UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __UpperCamelCase ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Union[str, Any] , __UpperCamelCase :Dict , __UpperCamelCase :Dict ): A = list(__UpperCamelCase ) A = begin_index def __call__( self :Optional[int] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :int ): A = 1 - jnp.bool_(cur_len - self.begin_index ) A = jnp.where(__UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __UpperCamelCase ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :List[str] , __UpperCamelCase :list ): A = list(__UpperCamelCase ) def __call__( self :str , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): A = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :List[str] ): A = dict(__UpperCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A = force_token_array.at[index].set(__UpperCamelCase ) A = jnp.intaa(__UpperCamelCase ) def __call__( self :Tuple , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ): def _force_token(__UpperCamelCase :Any ): A = scores.shape[0] A = self.force_token_array[generation_idx] A = jnp.ones_like(__UpperCamelCase , dtype=scores.dtype ) * -float("inf" ) A = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A = lax.dynamic_update_slice(__UpperCamelCase , __UpperCamelCase , (0, current_token) ) return new_scores A = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__UpperCamelCase ) , lambda: scores , ) , ) return scores class _UpperCAmelCase ( lowercase_ ): def __init__( self :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = generate_config.eos_token_id A = generate_config.no_timestamps_token_id A = generate_config.no_timestamps_token_id + 1 A = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__UpperCamelCase , "max_initial_timestamp_index" ): A = generate_config.max_initial_timestamp_index else: A = model_config.vocab_size if self.max_initial_timestamp_index is None: A = model_config.vocab_size def __call__( self :Any , __UpperCamelCase :str , __UpperCamelCase :Any , __UpperCamelCase :int ): # suppress <|notimestamps|> which is handled by without_timestamps A = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(__UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any] ): A = jnp.where((cur_len - self.begin_index) >= 1 , __UpperCamelCase , __UpperCamelCase ) A = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __UpperCamelCase , ) A = jnp.where((cur_len - self.begin_index) < 2 , __UpperCamelCase , __UpperCamelCase ) A = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __UpperCamelCase , __UpperCamelCase , ) return jnp.where( __UpperCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __UpperCamelCase , ) A = jax.vmap(__UpperCamelCase )(__UpperCamelCase , __UpperCamelCase ) A = jnp.where(cur_len == self.begin_index , __UpperCamelCase , __UpperCamelCase ) A = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __UpperCamelCase , ) A = self.timestamp_begin + self.max_initial_timestamp_index A = jnp.where( __UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __UpperCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp A = jax.nn.log_softmax(__UpperCamelCase , axis=-1 ) def handle_cumulative_probs(__UpperCamelCase :Dict , __UpperCamelCase :Optional[int] ): A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __UpperCamelCase , ) A = jax.vmap(__UpperCamelCase )(__UpperCamelCase , __UpperCamelCase ) return scores
292
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A__ ( UpperCamelCase ): A = [False] * len(UpperCamelCase ) A = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): A = True A = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
1
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :str , __UpperCamelCase :Tuple=13 , __UpperCamelCase :List[str]=7 , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=True , __UpperCamelCase :Union[str, Any]=False , __UpperCamelCase :Dict=True , __UpperCamelCase :str=99 , __UpperCamelCase :Any=64 , __UpperCamelCase :int=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :Tuple=64 , __UpperCamelCase :Union[str, Any]="gelu" , __UpperCamelCase :List[str]=0.1 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :List[Any]=5_12 , __UpperCamelCase :List[Any]=16 , __UpperCamelCase :str=2 , __UpperCamelCase :str=0.02 , __UpperCamelCase :Dict=3 , __UpperCamelCase :Optional[int]=4 , __UpperCamelCase :Dict=None , ): A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def lowerCamelCase ( self :str ): return MPNetConfig.from_pretrained("microsoft/mpnet-base" ) def lowerCamelCase ( self :Dict ): A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A = ids_tensor([self.batch_size] , self.num_choices ) A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self :Union[str, Any] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :str , __UpperCamelCase :Any , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] ): A = MPNetModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , __UpperCamelCase ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :Dict , __UpperCamelCase :List[Any] ): A = MPNetForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model( __UpperCamelCase , attention_mask=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :str , __UpperCamelCase :Tuple , __UpperCamelCase :List[Any] ): A = self.num_labels A = MPNetForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :List[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :int ): A = self.num_choices A = MPNetForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A = model( __UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self :str , __UpperCamelCase :Optional[int] , __UpperCamelCase :int , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :Any , __UpperCamelCase :Optional[int] ): A = self.num_labels A = MPNetForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self :Optional[int] ): A = self.prepare_config_and_inputs() ((A), (A), (A), (A), (A), (A)) = config_and_inputs A = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': MPNetModel, '''fill-mask''': MPNetForMaskedLM, '''question-answering''': MPNetForQuestionAnswering, '''text-classification''': MPNetForSequenceClassification, '''token-classification''': MPNetForTokenClassification, '''zero-shot''': MPNetForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = True def lowerCamelCase ( self :Tuple ): A = MPNetModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Optional[int] ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Tuple ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__UpperCamelCase ) def lowerCamelCase ( self :int ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__UpperCamelCase ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self :List[Any] ): A = MPNetModel.from_pretrained("microsoft/mpnet-base" ) A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A = model(__UpperCamelCase )[0] A = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __UpperCamelCase ) A = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
292
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
1
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = BlenderbotSmallTokenizer UpperCamelCase = False def lowerCamelCase ( self :int ): super().setUp() A = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) A = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] A = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__UpperCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__UpperCamelCase ) ) def lowerCamelCase ( self :Union[str, Any] , **__UpperCamelCase :List[str] ): kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any ): A = "adapt act apte" A = "adapt act apte" return input_text, output_text def lowerCamelCase ( self :str ): A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A = "adapt act apte" A = ["adapt", "act", "ap@@", "te"] A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [13_84] A = "I am a small frog." A = tok([src_text] , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"] A = tok.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def lowerCamelCase ( self :List[Any] ): A = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) A = "I am a small frog ." A = "." A = tok(__UpperCamelCase )["input_ids"] A = tok(__UpperCamelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
292
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _UpperCAmelCase : UpperCamelCase = None def lowerCamelCase ( self :List[Any] ): A = self.feature_extraction_class(**self.feat_extract_dict ) A = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) A = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) A = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Tuple ): A = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
292
1
"""simple docstring""" import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class _UpperCAmelCase : def __init__( self :int , __UpperCamelCase :int , __UpperCamelCase :Any=13 , __UpperCamelCase :Optional[int]=7 , __UpperCamelCase :Optional[Any]=True , __UpperCamelCase :List[str]=True , __UpperCamelCase :Any=True , __UpperCamelCase :Any=True , __UpperCamelCase :Union[str, Any]=99 , __UpperCamelCase :Union[str, Any]=32 , __UpperCamelCase :Union[str, Any]=5 , __UpperCamelCase :int=4 , __UpperCamelCase :Any=4 , __UpperCamelCase :str="gelu" , __UpperCamelCase :List[str]=0.0 , __UpperCamelCase :int=0.1 , __UpperCamelCase :Tuple=True , __UpperCamelCase :List[Any]=5_12 , __UpperCamelCase :Dict=16 , __UpperCamelCase :List[Any]=2 , __UpperCamelCase :Any=0.02 , __UpperCamelCase :Tuple=3 , __UpperCamelCase :Tuple=4 , __UpperCamelCase :List[str]=None , ): A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_multiple_size A = hidden_act A = hidden_dropout A = attention_dropout A = weight_tying A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def lowerCamelCase ( self :Dict ): A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_labels: A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A = self.get_config() return config, input_ids, input_mask, token_labels def lowerCamelCase ( self :Any ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :str ): A, A, A, A = self.prepare_config_and_inputs() A = True return config, input_ids, input_mask, token_labels def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Optional[int] ): A = GPTNeoXJapaneseModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :int ): A = True A = GPTNeoXJapaneseModel(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[Any] ): A = GPTNeoXJapaneseForCausalLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :str , __UpperCamelCase :Any ): A = True A = GPTNeoXJapaneseForCausalLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] , dim=-1 ) A = torch.cat([input_mask, next_mask] , dim=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase ) A = output_from_no_past["hidden_states"][0] A = model( __UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0] # select random slice A = ids_tensor((1,) , output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def lowerCamelCase ( self :List[str] ): A = self.prepare_config_and_inputs() A, A, A, A = config_and_inputs A = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () UpperCamelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Optional[Any] ): A = GPTNeoXJapaneseModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :str ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Optional[Any] ): A, A, A, A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Tuple ): A, A, A, A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Any ): # This regression test was failing with PyTorch < 1.3 A, A, A, A = self.model_tester.prepare_config_and_inputs_for_decoder() A = None self.model_tester.create_and_check_model_as_decoder(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A, A, A, A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def lowerCamelCase ( self :int ): A = "abeja/gpt-neox-japanese-2.7b" A = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] A = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] A = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCamelCase ) A = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCamelCase ) A = [] for prompt in prompts: A = tokenizer(__UpperCamelCase , return_tensors="pt" ).input_ids A = model.generate(__UpperCamelCase , max_length=50 ) A = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) predicted_outputs += generated_string self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
292
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = RoFormerTokenizer UpperCamelCase = RoFormerTokenizerFast UpperCamelCase = True UpperCamelCase = True def lowerCamelCase ( self :List[str] ): super().setUp() def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Any ): A = "永和服装饰品有限公司,今天天气非常好" A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def lowerCamelCase ( self :int ): A = self.get_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.get_rust_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Any ): pass def lowerCamelCase ( self :Tuple ): pass def lowerCamelCase ( self :List[str] ): pass
292
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any]=13 , __UpperCamelCase :List[Any]=30 , __UpperCamelCase :List[str]=2 , __UpperCamelCase :int=3 , __UpperCamelCase :Dict=True , __UpperCamelCase :Optional[Any]=True , __UpperCamelCase :Any=32 , __UpperCamelCase :Dict=2 , __UpperCamelCase :Tuple=4 , __UpperCamelCase :Union[str, Any]=37 , __UpperCamelCase :List[str]="gelu" , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :Any=10 , __UpperCamelCase :Optional[int]=0.02 , __UpperCamelCase :str=3 , __UpperCamelCase :Optional[int]=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def lowerCamelCase ( self :int ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :List[str] ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] ): A = TFViTModel(config=__UpperCamelCase ) A = model(__UpperCamelCase , training=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A = self.image_size // 2 A = pixel_values[:, :, :image_size, :image_size] A = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase ) A = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :int , __UpperCamelCase :List[str] ): A = self.type_sequence_label_size A = TFViTForImageClassification(__UpperCamelCase ) A = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A = self.image_size // 2 A = pixel_values[:, :, :image_size, :image_size] A = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = TFViTForImageClassification(__UpperCamelCase ) A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Dict ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCamelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Union[str, Any] ): A = TFViTModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def lowerCamelCase ( self :Any ): pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def lowerCamelCase ( self :Optional[int] ): pass def lowerCamelCase ( self :List[str] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Layer ) ) def lowerCamelCase ( self :Union[str, Any] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :Any ): A = TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def lowerCamelCase ( self :str ): A = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="tf" ) # forward pass A = model(**__UpperCamelCase ) # verify the logits A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 )
292
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase = False ): if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected string as input, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) A = input_str.split("_" ) A = 0 if use_pascal else 1 A = words[start_index:] A = [word[0].upper() + word[1:] for word in words_to_capitalize] A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
292
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = None class _UpperCAmelCase ( lowercase_ , lowercase_ ): UpperCamelCase = 2 @register_to_config def __init__( self :Union[str, Any] , __UpperCamelCase :float = 0.02 , __UpperCamelCase :float = 1_00 , __UpperCamelCase :float = 1.007 , __UpperCamelCase :float = 80 , __UpperCamelCase :float = 0.05 , __UpperCamelCase :float = 50 , ): # standard deviation of the initial noise distribution A = sigma_max # setable values A = None A = None A = None # sigma(t_i) def lowerCamelCase ( self :List[str] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :Optional[int] = None ): return sample def lowerCamelCase ( self :str , __UpperCamelCase :int , __UpperCamelCase :Union[str, torch.device] = None ): A = num_inference_steps A = np.arange(0 , self.num_inference_steps )[::-1].copy() A = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A = torch.tensor(__UpperCamelCase , dtype=torch.floataa , device=__UpperCamelCase ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :float , __UpperCamelCase :Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: A = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A = 0 # sample eps ~ N(0, S_noise^2 * I) A = self.config.s_noise * randn_tensor(sample.shape , generator=__UpperCamelCase ).to(sample.device ) A = sigma + gamma * sigma A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :float , __UpperCamelCase :float , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :bool = True , ): A = sample_hat + sigma_hat * model_output A = (sample_hat - pred_original_sample) / sigma_hat A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__UpperCamelCase , derivative=__UpperCamelCase , pred_original_sample=__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :float , __UpperCamelCase :float , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :bool = True , ): A = sample_prev + sigma_prev * model_output A = (sample_prev - pred_original_sample) / sigma_prev A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__UpperCamelCase , derivative=__UpperCamelCase , pred_original_sample=__UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[int] ): raise NotImplementedError()
292
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ): A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ): super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ): if latents is None: A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) A = latents.to(__UpperCamelCase ) A = latents * scheduler.init_noise_sigma return latents def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) A = torch.device(f"cuda:{gpu_id}" ) A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) A = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A = None for cpu_offloaded_model in [self.unet, self.movq]: A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase ) # We'll offload the last model manually. A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase ( self :str ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase ) def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ): A = self._execution_device A = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) A = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = hint.repeat_interleave(__UpperCamelCase , dim=0 ) A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase ) A = self.scheduler.timesteps A = self.movq.config.latent_channels A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor ) # create initial latent A = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = {"image_embeds": image_embeds, "hint": hint} A = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: A, A = noise_pred.split(latents.shape[1] , dim=1 ) A, A = noise_pred.chunk(2 ) A, A = variance_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A, A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: A = image * 0.5 + 0.5 A = image.clamp(0 , 1 ) A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
292
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = (UniPCMultistepScheduler,) UpperCamelCase = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self :Dict , **__UpperCamelCase :Dict ): A = { "num_train_timesteps": 10_00, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__UpperCamelCase ) return config def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Any]=0 , **__UpperCamelCase :int ): A = dict(self.forward_default_kwargs ) A = kwargs.pop("num_inference_steps" , __UpperCamelCase ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) A = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals A = dummy_past_residuals[: new_scheduler.config.solver_order] A, A = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample A = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self :List[Any] , __UpperCamelCase :List[str]=0 , **__UpperCamelCase :Any ): A = dict(self.forward_default_kwargs ) A = kwargs.pop("num_inference_steps" , __UpperCamelCase ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) A = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) A = dummy_past_residuals[: new_scheduler.config.solver_order] A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample A = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Tuple=None , **__UpperCamelCase :Dict ): if scheduler is None: A = self.scheduler_classes[0] A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) A = self.scheduler_classes[0] A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): A = model(__UpperCamelCase , __UpperCamelCase ) A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowerCamelCase ( self :Any ): A = dict(self.forward_default_kwargs ) A = kwargs.pop("num_inference_steps" , __UpperCamelCase ) for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**__UpperCamelCase ) A = self.dummy_sample A = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCamelCase ) elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ): A = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A = [residual + 0.2, residual + 0.15, residual + 0.10] A = dummy_past_residuals[: scheduler.config.solver_order] A = scheduler.timesteps[5] A = scheduler.timesteps[6] A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase ( self :str ): # make sure that iterating over schedulers with same config names gives same results # for defaults A = UniPCMultistepScheduler(**self.get_scheduler_config() ) A = self.full_loop(scheduler=__UpperCamelCase ) A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A = DEISMultistepScheduler.from_config(scheduler.config ) A = DPMSolverMultistepScheduler.from_config(scheduler.config ) A = UniPCMultistepScheduler.from_config(scheduler.config ) A = self.full_loop(scheduler=__UpperCamelCase ) A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def lowerCamelCase ( self :List[Any] ): for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowerCamelCase ( self :Dict ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowerCamelCase ( self :List[str] ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) A = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowerCamelCase ( self :Union[str, Any] ): self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowerCamelCase ( self :Optional[int] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowerCamelCase ( self :Optional[int] ): A = self.full_loop() A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def lowerCamelCase ( self :Tuple ): A = self.full_loop(prediction_type="v_prediction" ) A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def lowerCamelCase ( self :str ): A = self.scheduler_classes[0] A = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) A = scheduler_class(**__UpperCamelCase ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): A = model(__UpperCamelCase , __UpperCamelCase ) A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa def lowerCamelCase ( self :str , **__UpperCamelCase :Optional[int] ): for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
292
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Any ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :Union[str, Any] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ): A = ViTMSNModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = self.type_sequence_label_size A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Optional[Any] ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Optional[int] ): A = ViTMSNModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def lowerCamelCase ( self :Union[str, Any] ): pass def lowerCamelCase ( self :int ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Tuple ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :List[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTMSNModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def lowerCamelCase ( self :Any ): torch.manual_seed(2 ) A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
292
1
"""simple docstring""" from __future__ import annotations def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ): if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif stress < 0: raise ValueError("Stress cannot be negative" ) elif tangential_force < 0: raise ValueError("Tangential Force cannot be negative" ) elif area < 0: raise ValueError("Area cannot be negative" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
1
"""simple docstring""" import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self :List[Any] ): # A mock response for an HTTP head request to emulate server down A = mock.Mock() A = 5_00 A = {} A = HTTPError A = {} # Download this model to make sure it's in the cache. A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCamelCase ( self :Optional[Any] ): # A mock response for an HTTP head request to emulate server down A = mock.Mock() A = 5_00 A = {} A = HTTPError A = {} # Download this model to make sure it's in the cache. A = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: A = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase ( self :Tuple ): # This test is for deprecated behavior and can be removed in v5 try: A = tempfile.mktemp() with open(__UpperCamelCase , "wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __UpperCamelCase ) A = AlbertTokenizer.from_pretrained(__UpperCamelCase ) finally: os.remove(__UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" , "wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __UpperCamelCase ) A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 10_00 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCamelCase ( self :Union[str, Any] ): # This test is for deprecated behavior and can be removed in v5 A = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def lowerCamelCase ( cls :Tuple ): A = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def lowerCamelCase ( cls :Any ): try: delete_repo(token=cls._token , repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCamelCase ( self :int ): with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(__UpperCamelCase , "vocab.txt" ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) A = BertTokenizer(__UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token ) A = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__UpperCamelCase , repo_id="test-tokenizer" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) A = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def lowerCamelCase ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(__UpperCamelCase , "vocab.txt" ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) A = BertTokenizer(__UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token ) A = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) A = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def lowerCamelCase ( self :str ): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(__UpperCamelCase , "vocab.txt" ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) A = CustomTokenizer(__UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token ) A = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(__UpperCamelCase , "vocab.txt" ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) A = BertTokenizerFast.from_pretrained(__UpperCamelCase ) bert_tokenizer.save_pretrained(__UpperCamelCase ) A = CustomTokenizerFast.from_pretrained(__UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token ) A = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" ) A = AutoTokenizer.from_pretrained( f"{USER}/test-dynamic-tokenizer" , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" ) class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self :str ): A = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCamelCase ( self :Dict ): A = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] ) def lowerCamelCase ( self :str ): A = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) , ["A", "BC"] ) self.assertEqual(trie.split("BCA" ) , ["BC", "A"] ) def lowerCamelCase ( self :Any ): A = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCamelCase ( self :List[str] ): A = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCamelCase ( self :Tuple ): A = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) , ["AB", "C"] ) def lowerCamelCase ( self :str ): A = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] ) def lowerCamelCase ( self :Dict ): # Even if the offsets are wrong, we necessarily output correct string # parts. A = Trie() A = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(__UpperCamelCase , ["AB", "C"] )
292
"""simple docstring""" def A__ ( UpperCamelCase ): A = generate_pascal_triangle(UpperCamelCase ) for row_idx in range(UpperCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [] for current_row_idx in range(UpperCamelCase ): A = populate_current_row(UpperCamelCase , UpperCamelCase ) triangle.append(UpperCamelCase ) return triangle def A__ ( UpperCamelCase , UpperCamelCase ): A = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 A, A = 1, 1 for current_col_idx in range(1 , UpperCamelCase ): calculate_current_element( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return current_row def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = triangle[current_row_idx - 1][current_col_idx - 1] A = triangle[current_row_idx - 1][current_col_idx] A = above_to_left_elt + above_to_right_elt def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [[1]] for row_index in range(1 , UpperCamelCase ): A = [0] + result[-1] + [0] A = row_index + 1 # Calculate the number of distinct elements in a row A = sum(divmod(UpperCamelCase , 2 ) ) A = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] A = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() A = row_first_half + row_second_half result.append(UpperCamelCase ) return result def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: A = F"{func.__name__}({value})" A = timeit(F"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
292
1
"""simple docstring""" from math import factorial def A__ ( UpperCamelCase = 20 ): A = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... A = n // 2 return int(factorial(UpperCamelCase ) / (factorial(UpperCamelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: _snake_case : Dict = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
292
"""simple docstring""" import math import sys def A__ ( UpperCamelCase ): A = "" try: with open(UpperCamelCase , "rb" ) as binary_file: A = binary_file.read() for dat in data: A = F"{dat:08b}" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = {"0": "0", "1": "1"} A, A = "", "" A = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A = lexicon[curr_string] result += last_match_id A = last_match_id + "0" if math.loga(UpperCamelCase ).is_integer(): A = {} for curr_key in list(UpperCamelCase ): A = lexicon.pop(UpperCamelCase ) A = new_lex A = last_match_id + "1" index += 1 A = "" return result def A__ ( UpperCamelCase , UpperCamelCase ): A = 8 try: with open(UpperCamelCase , "wb" ) as opened_file: A = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = 0 for letter in data_bits: if letter == "1": break counter += 1 A = data_bits[counter:] A = data_bits[counter + 1 :] return data_bits def A__ ( UpperCamelCase , UpperCamelCase ): A = read_file_binary(UpperCamelCase ) A = remove_prefix(UpperCamelCase ) A = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
292
1
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def A__ ( *UpperCamelCase ): with open(UpperCamelCase , "r" ) as fh: fcntl.flock(UpperCamelCase , fcntl.LOCK_EX ) try: print(*UpperCamelCase ) finally: fcntl.flock(UpperCamelCase , fcntl.LOCK_UN ) _snake_case : Optional[int] = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) _snake_case : str = torch.device('cuda', local_rank) _snake_case : Tuple = socket.gethostname() _snake_case : Optional[Any] = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank _snake_case : Tuple = dist.get_rank() _snake_case : int = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
292
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
1
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports _snake_case : List[str] = '\nimport os\n' _snake_case : str = '\ndef foo():\n import os\n return False\n' _snake_case : Tuple = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' _snake_case : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' _snake_case : Any = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' _snake_case : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' _snake_case : int = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' _snake_case : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' _snake_case : Tuple = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' _snake_case : Optional[int] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' _snake_case : Tuple = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , UpperCamelCase ) def A__ ( UpperCamelCase , UpperCamelCase ): A = os.path.join(UpperCamelCase , "test_file.py" ) with open(UpperCamelCase , "w" ) as _tmp_file: _tmp_file.write(UpperCamelCase ) A = get_imports(UpperCamelCase ) assert parsed_imports == ["os"]
292
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
1
"""simple docstring""" from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _UpperCAmelCase : def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :int=2 , __UpperCamelCase :str=3 , __UpperCamelCase :Any=4 , __UpperCamelCase :Optional[Any]=2 , __UpperCamelCase :Optional[Any]=7 , __UpperCamelCase :List[Any]=True , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[str]=True , __UpperCamelCase :Any=True , __UpperCamelCase :Optional[Any]=99 , __UpperCamelCase :Any=36 , __UpperCamelCase :List[str]=2 , __UpperCamelCase :Union[str, Any]=4 , __UpperCamelCase :Dict=37 , __UpperCamelCase :int="gelu" , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :Tuple=5_12 , __UpperCamelCase :Optional[int]=16 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=0.02 , __UpperCamelCase :int=6 , __UpperCamelCase :Any=6 , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Tuple=4 , __UpperCamelCase :str=None , __UpperCamelCase :List[str]=10_00 , ): A = parent A = batch_size A = num_channels A = image_size A = patch_size A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = coordinate_size A = shape_size A = num_labels A = num_choices A = scope A = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A = text_seq_length A = (image_size // patch_size) ** 2 + 1 A = self.text_seq_length + self.image_seq_length def lowerCamelCase ( self :Optional[Any] ): A = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) A = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) A = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A = bbox[i, j, 3] A = bbox[i, j, 1] A = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: A = bbox[i, j, 2] A = bbox[i, j, 0] A = tmp_coordinate A = tf.constant(__UpperCamelCase ) A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.text_seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) A = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase ( self :List[str] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :List[Any] ): A = TFLayoutLMvaModel(config=__UpperCamelCase ) # text + image A = model(__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase ) A = model( __UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , training=__UpperCamelCase , ) A = model(__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only A = model(__UpperCamelCase , training=__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only A = model({"pixel_values": pixel_values} , training=__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase ( self :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :int , __UpperCamelCase :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Union[str, Any] ): A = self.num_labels A = TFLayoutLMvaForSequenceClassification(config=__UpperCamelCase ) A = model( __UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Any , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :int , __UpperCamelCase :Any , __UpperCamelCase :Dict , __UpperCamelCase :List[str] ): A = self.num_labels A = TFLayoutLMvaForTokenClassification(config=__UpperCamelCase ) A = model( __UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase ( self :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any] ): A = 2 A = TFLayoutLMvaForQuestionAnswering(config=__UpperCamelCase ) A = model( __UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , training=__UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self :List[str] ): A = self.prepare_config_and_inputs() ((A), (A), (A), (A), (A), (A), (A), (A)) = config_and_inputs A = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) UpperCamelCase = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :Optional[Any] ): return True def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str , __UpperCamelCase :Optional[int]=False ): A = copy.deepcopy(__UpperCamelCase ) if model_class in get_values(__UpperCamelCase ): A = { k: tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__UpperCamelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__UpperCamelCase ): A = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCamelCase ): A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCamelCase ): A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCamelCase ): A = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowerCamelCase ( self :List[Any] ): A = TFLayoutLMvaModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Tuple ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Dict ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) if getattr(__UpperCamelCase , "hf_compute_loss" , __UpperCamelCase ): # The number of elements in the loss should be the same as the number of elements in the label A = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase ) A = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCamelCase )[0] ] A = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs A = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase ) A = prepared_for_class.pop("input_ids" ) A = model(__UpperCamelCase , **__UpperCamelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions A = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase ) A = prepared_for_class.pop("input_ids" ) if "labels" in prepared_for_class: A = prepared_for_class["labels"].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: A = -1_00 A = tf.convert_to_tensor(__UpperCamelCase ) A = model(__UpperCamelCase , **__UpperCamelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict A = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase ) A = model(__UpperCamelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple A = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase ) # Get keys that were added with the _prepare_for_class function A = prepared_for_class.keys() - inputs_dict.keys() A = inspect.signature(model.call ).parameters A = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple A = {0: "input_ids"} for label_key in label_keys: A = signature_names.index(__UpperCamelCase ) A = label_key A = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple A = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: A = prepared_for_class[value] A = tuple(__UpperCamelCase ) # Send to model A = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowerCamelCase ( self :int ): ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :int ): ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :str ): ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :str ): ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Optional[int] ): ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @slow def lowerCamelCase ( self :Optional[int] ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFLayoutLMvaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase ) if is_vision_available() else None @slow def lowerCamelCase ( self :str ): A = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="tf" ).pixel_values A = tf.constant([[1, 2]] ) A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass A = model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase ) # verify the logits A = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __UpperCamelCase ) A = tf.constant( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
292
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ): pass def A__ ( UpperCamelCase ): A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ): A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ): A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase ) import datasets A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowerCamelCase ( self :Optional[Any] ): pass @slow @require_torch def lowerCamelCase ( self :Optional[Any] ): A = "Intel/dpt-large" A = pipeline("depth-estimation" , model=__UpperCamelCase ) A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowerCamelCase ( self :Optional[Any] ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
292
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : Any = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Union[str, Any] = ['PerceiverFeatureExtractor'] _snake_case : str = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
1
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A__ ( UpperCamelCase , UpperCamelCase ): A = args.log_outputs A = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric A = load_metric("wer" ) A = load_metric("cer" ) # compute metrics A = wer.compute(references=result["target"] , predictions=result["prediction"] ) A = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results A = F"WER: {wer_result}\nCER: {cer_result}" print(UpperCamelCase ) with open(F"{dataset_id}_eval_results.txt" , "w" ) as f: f.write(UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A = F"log_{dataset_id}_predictions.txt" A = F"log_{dataset_id}_targets.txt" with open(UpperCamelCase , "w" ) as p, open(UpperCamelCase , "w" ) as t: # mapping function to write output def write_to_file(UpperCamelCase , UpperCamelCase ): p.write(F"{i}" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(F"{i}" + "\n" ) t.write(batch["target"] + "\n" ) result.map(UpperCamelCase , with_indices=UpperCamelCase ) def A__ ( UpperCamelCase ): A = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A = re.sub(UpperCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: A = " ".join(text.split(UpperCamelCase ) ) return text def A__ ( UpperCamelCase ): # load dataset A = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A = AutoFeatureExtractor.from_pretrained(args.model_id ) A = feature_extractor.sampling_rate # resample audio A = dataset.cast_column("audio" , Audio(sampling_rate=UpperCamelCase ) ) # load eval pipeline if args.device is None: A = 0 if torch.cuda.is_available() else -1 A = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(UpperCamelCase ): A = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A = prediction["text"] A = normalize_text(batch["sentence"] ) return batch # run inference on all examples A = dataset.map(UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) _snake_case : Tuple = parser.parse_args() main(args)
292
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A__ ( UpperCamelCase = "laptop" ): A = F"https://www.amazon.in/laptop/s?k={product}" A = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles A = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A = item.ha.text A = "https://www.amazon.in/" + item.ha.a["href"] A = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A = "Not available" try: A = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A = "" try: A = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A = float("nan" ) except AttributeError: pass A = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A = " " A = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
292
1
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _snake_case : str = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def A__ ( UpperCamelCase , UpperCamelCase ): inspect_dataset(UpperCamelCase , UpperCamelCase ) A = path + ".py" assert script_name in os.listdir(UpperCamelCase ) assert "__pycache__" not in os.listdir(UpperCamelCase ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def A__ ( UpperCamelCase , UpperCamelCase ): inspect_metric(UpperCamelCase , UpperCamelCase ) A = path + ".py" assert script_name in os.listdir(UpperCamelCase ) assert "__pycache__" not in os.listdir(UpperCamelCase ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = get_dataset_config_info(UpperCamelCase , config_name=UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): with pytest.raises(UpperCamelCase ): get_dataset_config_info(UpperCamelCase , config_name=UpperCamelCase ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def A__ ( UpperCamelCase , UpperCamelCase ): A = get_dataset_config_names(UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = get_dataset_infos(UpperCamelCase ) assert list(infos.keys() ) == expected_configs A = expected_configs[0] assert expected_config in infos A = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = get_dataset_infos(UpperCamelCase ) assert expected_config in infos A = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): with pytest.raises(UpperCamelCase ): get_dataset_split_names(UpperCamelCase , config_name=UpperCamelCase )
292
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ): A = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 ) A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): A = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = len(__UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__UpperCamelCase )}." ) # get prompt text embeddings A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A = text_input_ids[:, : self.tokenizer.model_max_length] A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A, A, A = text_embeddings.shape A = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A = 42 if negative_prompt is None: A = [""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=" f" {type(__UpperCamelCase )}." ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A = negative_prompt A = text_input_ids.shape[-1] A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , ) A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A = uncond_embeddings.shape[1] A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to( self.device ) else: A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A = {} if accepts_eta: A = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: A, A = noise_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = 1 / 0.18_215 * latents A = self.vae.decode(__UpperCamelCase ).sample A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
292
1
"""simple docstring""" import math import sys def A__ ( UpperCamelCase ): A = "" try: with open(UpperCamelCase , "rb" ) as binary_file: A = binary_file.read() for dat in data: A = F"{dat:08b}" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = {"0": "0", "1": "1"} A, A = "", "" A = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A = lexicon[curr_string] result += last_match_id A = last_match_id + "0" if math.loga(UpperCamelCase ).is_integer(): A = {} for curr_key in list(UpperCamelCase ): A = lexicon.pop(UpperCamelCase ) A = new_lex A = last_match_id + "1" index += 1 A = "" return result def A__ ( UpperCamelCase , UpperCamelCase ): A = 8 try: with open(UpperCamelCase , "wb" ) as opened_file: A = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = 0 for letter in data_bits: if letter == "1": break counter += 1 A = data_bits[counter:] A = data_bits[counter + 1 :] return data_bits def A__ ( UpperCamelCase , UpperCamelCase ): A = read_file_binary(UpperCamelCase ) A = remove_prefix(UpperCamelCase ) A = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
292
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
1
"""simple docstring""" from __future__ import annotations import requests _snake_case : Dict = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def A__ ( UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ): A = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ): A = F"Invalid search term: {invalid_search_terms}" raise ValueError(UpperCamelCase ) A = requests.get( F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError A = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )} A = {} for id_ in range(UpperCamelCase ): A = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
292
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[Any] = logging.get_logger(__name__) _snake_case : List[str] = { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json', # See all XGLM models at https://huggingface.co/models?filter=xglm } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''xglm''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = { '''num_attention_heads''': '''attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Optional[int] , __UpperCamelCase :Union[str, Any]=25_60_08 , __UpperCamelCase :Optional[Any]=20_48 , __UpperCamelCase :Optional[int]=10_24 , __UpperCamelCase :Dict=40_96 , __UpperCamelCase :List[Any]=24 , __UpperCamelCase :str=16 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Any=0.1 , __UpperCamelCase :Dict=0.1 , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :int=0.02 , __UpperCamelCase :List[str]=True , __UpperCamelCase :Tuple=True , __UpperCamelCase :int=2 , __UpperCamelCase :List[Any]=1 , __UpperCamelCase :Tuple=0 , __UpperCamelCase :int=2 , **__UpperCamelCase :List[Any] , ): A = vocab_size A = max_position_embeddings A = d_model A = ffn_dim A = num_layers A = attention_heads A = activation_function A = dropout A = attention_dropout A = activation_dropout A = layerdrop A = init_std A = scale_embedding # scale factor will be sqrt(d_model) if True A = use_cache super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
292
"""simple docstring""" from math import isqrt, loga def A__ ( UpperCamelCase ): A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase , UpperCamelCase ): A = False return [i for i in range(2 , UpperCamelCase ) if is_prime[i]] def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ): A = degree * loga(UpperCamelCase ) A = int(UpperCamelCase ) A = calculate_prime_numbers(UpperCamelCase ) A = 0 A = 0 A = len(UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
292
1
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params _snake_case : Any = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def A__ ( UpperCamelCase ): for pegasus_name, hf_name in PATTERNS: A = k.replace(UpperCamelCase , UpperCamelCase ) return k def A__ ( UpperCamelCase , UpperCamelCase ): A = DEFAULTS.copy() cfg_kwargs.update(UpperCamelCase ) A = PegasusConfig(**UpperCamelCase ) A = PegasusForConditionalGeneration(UpperCamelCase ) A = torch_model.model.state_dict() A = {} for k, v in tf_weights.items(): A = rename_state_dict_key(UpperCamelCase ) if new_k not in sd: raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: A = v.T A = torch.tensor(UpperCamelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected A = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) A = mapping["shared.weight"] A = mapping["shared.weight"] A = {k: torch.zeros_like(UpperCamelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCamelCase ) A, A = torch_model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) A = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}" assert extra == [], F"no matches found for the following tf keys {extra}" return torch_model def A__ ( UpperCamelCase="./ckpt/aeslc/model.ckpt-32000" ): A = tf.train.list_variables(UpperCamelCase ) A = {} A = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCamelCase , desc="converting tf checkpoint to dict" ): A = any(pat in name for pat in ignore_name ) if skip_key: continue A = tf.train.load_variable(UpperCamelCase , UpperCamelCase ) A = array return tf_weights def A__ ( UpperCamelCase , UpperCamelCase ): # save tokenizer first A = Path(UpperCamelCase ).parent.name A = task_specific_params[F"summarization_{dataset}"]["max_position_embeddings"] A = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCamelCase ) # convert model A = get_tf_weights_as_numpy(UpperCamelCase ) A = task_specific_params[F"summarization_{dataset}"] if dataset == "large": A = task_specific_params A = convert_pegasus(UpperCamelCase , UpperCamelCase ) torch_model.save_pretrained(UpperCamelCase ) A = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCamelCase , Path(UpperCamelCase ) / "pytorch_model.bin" ) if __name__ == "__main__": _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : int = parser.parse_args() if args.save_dir is None: _snake_case : Tuple = Path(args.tf_ckpt_path).parent.name _snake_case : List[str] = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
292
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Union[str, Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _snake_case : List[str] = logging.get_logger(__name__) def A__ ( UpperCamelCase ): A = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , ) A = DetaConfig( backbone_config=UpperCamelCase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase , with_box_refine=UpperCamelCase , two_stage=UpperCamelCase , ) # set labels A = "huggingface/label-files" if "o365" in model_name: A = 366 A = "object365-id2label.json" else: A = 91 A = "coco-detection-id2label.json" A = num_labels A = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) ) , "r" ) ) A = {int(UpperCamelCase ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} return config def A__ ( UpperCamelCase ): A = [] # stem # fmt: off rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") ) rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") ) rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") ) rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") ) rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") ) rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") ) rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") ) # fmt: on return rename_keys def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = dct.pop(UpperCamelCase ) A = val def A__ ( UpperCamelCase , UpperCamelCase ): A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" ) A = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:dim, :] A = in_proj_bias[: dim] A = in_proj_weight[ dim : dim * 2, : ] A = in_proj_bias[ dim : dim * 2 ] A = in_proj_weight[ -dim :, : ] A = in_proj_bias[-dim :] # fmt: on def A__ ( UpperCamelCase , UpperCamelCase ): # transformer decoder self-attention layers A = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention A = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) A = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:hidden_size, :] A = in_proj_bias[:hidden_size] A = in_proj_weight[ hidden_size : hidden_size * 2, : ] A = in_proj_bias[hidden_size : hidden_size * 2] A = in_proj_weight[-hidden_size:, :] A = in_proj_bias[-hidden_size:] def A__ ( ): A = "http://images.cocodataset.org/val2017/000000039769.jpg" A = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = get_deta_config(UpperCamelCase ) # load original state dict if model_name == "deta-swin-large": A = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" ) elif model_name == "deta-swin-large-o365": A = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" ) else: raise ValueError(F"Model name {model_name} not supported" ) A = torch.load(UpperCamelCase , map_location="cpu" )["model"] # original state dict for name, param in state_dict.items(): print(UpperCamelCase , param.shape ) # rename keys A = create_rename_keys(UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) read_in_swin_q_k_v(UpperCamelCase , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase , UpperCamelCase ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: A = state_dict.pop(UpperCamelCase ) A = val if "input_proj" in key: A = state_dict.pop(UpperCamelCase ) A = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: A = state_dict.pop(UpperCamelCase ) A = val # finally, create HuggingFace model and load state dict A = DetaForObjectDetection(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() A = "cuda" if torch.cuda.is_available() else "cpu" model.to(UpperCamelCase ) # load image processor A = DetaImageProcessor(format="coco_detection" ) # verify our conversion on image A = prepare_img() A = processor(images=UpperCamelCase , return_tensors="pt" ) A = encoding["pixel_values"] A = model(pixel_values.to(UpperCamelCase ) ) # verify logits print("Logits:" , outputs.logits[0, :3, :3] ) print("Boxes:" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": A = torch.tensor( [[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] ) A = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] ) elif model_name == "deta-swin-large-o365": A = torch.tensor( [[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] ) A = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase ) , atol=1E-4 ) print("Everything ok!" ) if pytorch_dump_folder_path: # Save model and processor logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) # Push to hub if push_to_hub: print("Pushing model and processor to hub..." ) model.push_to_hub(F"jozhang97/{model_name}" ) processor.push_to_hub(F"jozhang97/{model_name}" ) if __name__ == "__main__": _snake_case : str = argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _snake_case : Tuple = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
292
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
1
"""simple docstring""" def A__ ( UpperCamelCase = 1_000 ): A = 2**power A = str(UpperCamelCase ) A = list(UpperCamelCase ) A = 0 for i in list_num: sum_of_num += int(UpperCamelCase ) return sum_of_num if __name__ == "__main__": _snake_case : Any = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) _snake_case : Tuple = solution(power) print('Sum of the digits is: ', result)
292
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A__ ( UpperCamelCase ): A = [False] * len(UpperCamelCase ) A = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): A = True A = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
1
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :int ): A = order # a_{0} ... a_{k} A = [1.0] + [0.0] * order # b_{0} ... b_{k} A = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A = [0.0] * self.order # y[n-1] ... y[n-k] A = [0.0] * self.order def lowerCamelCase ( self :List[Any] , __UpperCamelCase :list[float] , __UpperCamelCase :list[float] ): if len(__UpperCamelCase ) < self.order: A = [1.0, *a_coeffs] if len(__UpperCamelCase ) != self.order + 1: A = ( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(__UpperCamelCase )}" ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != self.order + 1: A = ( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(__UpperCamelCase )}" ) raise ValueError(__UpperCamelCase ) A = a_coeffs A = b_coeffs def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :float ): A = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A = self.input_history[:-1] A = self.output_history[:-1] A = sample A = result return result
292
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
1
"""simple docstring""" def A__ ( UpperCamelCase = 100 ): A = n * (n + 1) * (2 * n + 1) / 6 A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
292
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _UpperCAmelCase : UpperCamelCase = None def lowerCamelCase ( self :List[Any] ): A = self.feature_extraction_class(**self.feat_extract_dict ) A = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) A = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) A = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Tuple ): A = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
292
1
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : Dict = logging.get_logger(__name__) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = ['''pixel_values'''] def __init__( self :Any , __UpperCamelCase :bool = True , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase :bool = True , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = True , __UpperCamelCase :Union[int, float] = 1 / 2_55 , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __UpperCamelCase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__UpperCamelCase :Dict , ): super().__init__(**__UpperCamelCase ) A = size if size is not None else {"shortest_edge": 2_24} A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} A = get_size_dict(__UpperCamelCase , param_name="crop_size" ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN A = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase ( self :List[str] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :int , ): A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: A = int((2_56 / 2_24) * size["shortest_edge"] ) A = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase ) A = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( __UpperCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :np.ndarray , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :Dict , ): A = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" ) return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Union[int, float] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :List[Any] , ): return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :int , __UpperCamelCase :np.ndarray , __UpperCamelCase :Union[float, List[float]] , __UpperCamelCase :Union[float, List[float]] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :int , ): return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :ImageInput , __UpperCamelCase :Optional[bool] = None , __UpperCamelCase :Optional[Dict[str, int]] = None , __UpperCamelCase :PILImageResampling = None , __UpperCamelCase :Optional[bool] = None , __UpperCamelCase :Optional[Dict[str, int]] = None , __UpperCamelCase :Optional[bool] = None , __UpperCamelCase :Optional[float] = None , __UpperCamelCase :Optional[bool] = None , __UpperCamelCase :Optional[Union[float, Iterable[float]]] = None , __UpperCamelCase :Optional[Union[float, Iterable[float]]] = None , __UpperCamelCase :Optional[TensorType] = None , __UpperCamelCase :ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase :List[Any] , ): A = do_resize if do_resize is not None else self.do_resize A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = size if size is not None else self.size A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(__UpperCamelCase , param_name="crop_size" ) A = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. A = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: A = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images] if do_center_crop: A = [self.center_crop(__UpperCamelCase , __UpperCamelCase ) for image in images] if do_rescale: A = [self.rescale(__UpperCamelCase , __UpperCamelCase ) for image in images] if do_normalize: A = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images] A = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] A = {"pixel_values": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
292
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = RoFormerTokenizer UpperCamelCase = RoFormerTokenizerFast UpperCamelCase = True UpperCamelCase = True def lowerCamelCase ( self :List[str] ): super().setUp() def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Any ): A = "永和服装饰品有限公司,今天天气非常好" A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def lowerCamelCase ( self :int ): A = self.get_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.get_rust_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Any ): pass def lowerCamelCase ( self :Tuple ): pass def lowerCamelCase ( self :List[str] ): pass
292
1
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase = False ): if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected string as input, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) A = input_str.split("_" ) A = 0 if use_pascal else 1 A = words[start_index:] A = [word[0].upper() + word[1:] for word in words_to_capitalize] A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
292
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case : Any = logging.get_logger(__name__) _snake_case : Optional[int] = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class _UpperCAmelCase ( lowercase_ , lowercase_ ): UpperCamelCase = '''convnextv2''' def __init__( self :Union[str, Any] , __UpperCamelCase :Optional[int]=3 , __UpperCamelCase :Tuple=4 , __UpperCamelCase :List[str]=4 , __UpperCamelCase :Tuple=None , __UpperCamelCase :Union[str, Any]=None , __UpperCamelCase :Tuple="gelu" , __UpperCamelCase :Any=0.02 , __UpperCamelCase :List[Any]=1e-12 , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :Optional[Any]=2_24 , __UpperCamelCase :Dict=None , __UpperCamelCase :str=None , **__UpperCamelCase :Optional[int] , ): super().__init__(**__UpperCamelCase ) A = num_channels A = patch_size A = num_stages A = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes A = [3, 3, 9, 3] if depths is None else depths A = hidden_act A = initializer_range A = layer_norm_eps A = drop_path_rate A = image_size A = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] A, A = get_aligned_output_features_output_indices( out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
292
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ): A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ): super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ): if latents is None: A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) A = latents.to(__UpperCamelCase ) A = latents * scheduler.init_noise_sigma return latents def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) A = torch.device(f"cuda:{gpu_id}" ) A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) A = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A = None for cpu_offloaded_model in [self.unet, self.movq]: A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase ) # We'll offload the last model manually. A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase ( self :str ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase ) def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ): A = self._execution_device A = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) A = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = hint.repeat_interleave(__UpperCamelCase , dim=0 ) A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase ) A = self.scheduler.timesteps A = self.movq.config.latent_channels A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor ) # create initial latent A = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = {"image_embeds": image_embeds, "hint": hint} A = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: A, A = noise_pred.split(latents.shape[1] , dim=1 ) A, A = noise_pred.chunk(2 ) A, A = variance_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A, A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: A = image * 0.5 + 0.5 A = image.clamp(0 , 1 ) A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
292
1
"""simple docstring""" import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def A__ ( UpperCamelCase ): A = SwinConfig() A = swin_name.split("_" ) A = name_split[1] A = int(name_split[4] ) A = int(name_split[3][-1] ) if model_size == "tiny": A = 96 A = (2, 2, 6, 2) A = (3, 6, 12, 24) elif model_size == "small": A = 96 A = (2, 2, 18, 2) A = (3, 6, 12, 24) elif model_size == "base": A = 128 A = (2, 2, 18, 2) A = (4, 8, 16, 32) else: A = 192 A = (2, 2, 18, 2) A = (6, 12, 24, 48) if "in22k" in swin_name: A = 21_841 else: A = 1_000 A = "huggingface/label-files" A = "imagenet-1k-id2label.json" A = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) ) A = {int(UpperCamelCase ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = img_size A = num_classes A = embed_dim A = depths A = num_heads A = window_size return config def A__ ( UpperCamelCase ): if "patch_embed.proj" in name: A = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: A = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: A = "encoder." + name if "attn.proj" in name: A = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: A = name.replace("attn" , "attention.self" ) if "norm1" in name: A = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: A = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: A = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: A = name.replace("mlp.fc2" , "output.dense" ) if name == "norm.weight": A = "layernorm.weight" if name == "norm.bias": A = "layernorm.bias" if "head" in name: A = name.replace("head" , "classifier" ) else: A = "swin." + name return name def A__ ( UpperCamelCase , UpperCamelCase ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(UpperCamelCase ) if "mask" in key: continue elif "qkv" in key: A = key.split("." ) A = int(key_split[1] ) A = int(key_split[3] ) A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[ dim : dim * 2, : ] A = val[-dim:, :] else: A = val[ :dim ] A = val[ dim : dim * 2 ] A = val[ -dim: ] else: A = val return orig_state_dict def A__ ( UpperCamelCase , UpperCamelCase ): A = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase ) timm_model.eval() A = get_swin_config(UpperCamelCase ) A = SwinForImageClassification(UpperCamelCase ) model.eval() A = convert_state_dict(timm_model.state_dict() , UpperCamelCase ) model.load_state_dict(UpperCamelCase ) A = "http://images.cocodataset.org/val2017/000000039769.jpg" A = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) ) A = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) A = image_processor(images=UpperCamelCase , return_tensors="pt" ) A = timm_model(inputs["pixel_values"] ) A = model(**UpperCamelCase ).logits assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _snake_case : Optional[int] = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
292
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Any ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :Union[str, Any] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ): A = ViTMSNModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = self.type_sequence_label_size A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Optional[Any] ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Optional[int] ): A = ViTMSNModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def lowerCamelCase ( self :Union[str, Any] ): pass def lowerCamelCase ( self :int ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Tuple ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :List[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTMSNModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def lowerCamelCase ( self :Any ): torch.manual_seed(2 ) A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
292
1
"""simple docstring""" from __future__ import annotations def A__ ( UpperCamelCase ): # This function is recursive A = len(UpperCamelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else A = array[0] A = False A = 1 A = [] while not is_found and i < array_length: if array[i] < pivot: A = True A = [element for element in array[i:] if element >= array[i]] A = longest_subsequence(UpperCamelCase ) if len(UpperCamelCase ) > len(UpperCamelCase ): A = temp_array else: i += 1 A = [element for element in array[1:] if element >= pivot] A = [pivot, *longest_subsequence(UpperCamelCase )] if len(UpperCamelCase ) > len(UpperCamelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def A__ ( UpperCamelCase ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _snake_case : str = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class _UpperCAmelCase ( lowercase_ ): @staticmethod def lowerCamelCase ( __UpperCamelCase :ArgumentParser ): A = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=__UpperCamelCase , required=__UpperCamelCase , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=__UpperCamelCase , required=__UpperCamelCase , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=__UpperCamelCase , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=__UpperCamelCase , default=__UpperCamelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=__UpperCamelCase ) def __init__( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :str , __UpperCamelCase :str , *__UpperCamelCase :List[Any] , ): A = logging.get_logger("transformers-cli/converting" ) self._logger.info(f"Loading model {model_type}" ) A = model_type A = tf_checkpoint A = pytorch_dump_output A = config A = finetuning_task_name def lowerCamelCase ( self :Tuple ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(__UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): A = self._tf_checkpoint A = "" else: A = self._tf_checkpoint A = "" convert_transfo_xl_checkpoint_to_pytorch( __UpperCamelCase , self._config , self._pytorch_dump_output , __UpperCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
292
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _snake_case : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model') _snake_case : Dict = {'target_lang': 'fi', 'source_lang': 'en'} _snake_case : List[str] = '>>zh<<' _snake_case : List[Any] = 'Helsinki-NLP/' if is_torch_available(): _snake_case : int = 'pt' elif is_tf_available(): _snake_case : Tuple = 'tf' else: _snake_case : str = 'jax' @require_sentencepiece class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = MarianTokenizer UpperCamelCase = False UpperCamelCase = True def lowerCamelCase ( self :Any ): super().setUp() A = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) A = Path(self.tmpdirname ) save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] ) A = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Optional[int] ): return ( "This is a test", "This is a test", ) def lowerCamelCase ( self :Optional[Any] ): A = "</s>" A = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Any ): A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(__UpperCamelCase ) , 9 ) def lowerCamelCase ( self :Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase ( self :List[str] ): A = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) A = en_de_tokenizer(["I am a small frog"] , return_tensors=__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) A = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(__UpperCamelCase , batch.input_ids[0] ) A = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(__UpperCamelCase ) A = [x.name for x in Path(__UpperCamelCase ).glob("*" )] self.assertIn("source.spm" , __UpperCamelCase ) MarianTokenizer.from_pretrained(__UpperCamelCase ) def lowerCamelCase ( self :Optional[Any] ): A = self.get_tokenizer() A = tok( ["I am a small frog" * 10_00, "I am a small frog"] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def lowerCamelCase ( self :Any ): A = self.get_tokenizer() A = tok(["I am a tiny frog", "I am a small frog"] , padding=__UpperCamelCase , return_tensors=__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase ( self :str ): # fmt: off A = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def lowerCamelCase ( self :str ): A = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) A = "Tämä on testi" A = "This is a test" A = [76, 7, 20_47, 2] A = [69, 12, 11, 9_40, 2] A = tokenizer(__UpperCamelCase ).input_ids self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) A = tokenizer(text_target=__UpperCamelCase ).input_ids self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) A = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase )
292
"""simple docstring""" def A__ ( UpperCamelCase ): A = generate_pascal_triangle(UpperCamelCase ) for row_idx in range(UpperCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [] for current_row_idx in range(UpperCamelCase ): A = populate_current_row(UpperCamelCase , UpperCamelCase ) triangle.append(UpperCamelCase ) return triangle def A__ ( UpperCamelCase , UpperCamelCase ): A = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 A, A = 1, 1 for current_col_idx in range(1 , UpperCamelCase ): calculate_current_element( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return current_row def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = triangle[current_row_idx - 1][current_col_idx - 1] A = triangle[current_row_idx - 1][current_col_idx] A = above_to_left_elt + above_to_right_elt def A__ ( UpperCamelCase ): if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) A = [[1]] for row_index in range(1 , UpperCamelCase ): A = [0] + result[-1] + [0] A = row_index + 1 # Calculate the number of distinct elements in a row A = sum(divmod(UpperCamelCase , 2 ) ) A = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] A = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() A = row_first_half + row_second_half result.append(UpperCamelCase ) return result def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: A = F"{func.__name__}({value})" A = timeit(F"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
292
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Any = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys _snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
"""simple docstring""" import math import sys def A__ ( UpperCamelCase ): A = "" try: with open(UpperCamelCase , "rb" ) as binary_file: A = binary_file.read() for dat in data: A = F"{dat:08b}" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = {"0": "0", "1": "1"} A, A = "", "" A = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A = lexicon[curr_string] result += last_match_id A = last_match_id + "0" if math.loga(UpperCamelCase ).is_integer(): A = {} for curr_key in list(UpperCamelCase ): A = lexicon.pop(UpperCamelCase ) A = new_lex A = last_match_id + "1" index += 1 A = "" return result def A__ ( UpperCamelCase , UpperCamelCase ): A = 8 try: with open(UpperCamelCase , "wb" ) as opened_file: A = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def A__ ( UpperCamelCase ): A = 0 for letter in data_bits: if letter == "1": break counter += 1 A = data_bits[counter:] A = data_bits[counter + 1 :] return data_bits def A__ ( UpperCamelCase , UpperCamelCase ): A = read_file_binary(UpperCamelCase ) A = remove_prefix(UpperCamelCase ) A = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
292
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = ['''image_processor''', '''feature_extractor'''] UpperCamelCase = '''TvltImageProcessor''' UpperCamelCase = '''TvltFeatureExtractor''' def __init__( self :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Union[str, Any] ): super().__init__(image_processor=__UpperCamelCase , feature_extractor=__UpperCamelCase ) A = image_processor A = feature_extractor def __call__( self :Optional[int] , __UpperCamelCase :List[Any]=None , __UpperCamelCase :List[Any]=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :int=None , __UpperCamelCase :List[str]=False , __UpperCamelCase :int=False , *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[str] , ): if images is None and audio is None: raise ValueError("You need to specify either an `images` or `audio` input to process." ) A = None if images is not None: A = self.image_processor(__UpperCamelCase , mask_pixel=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) if images_mixed is not None: A = self.image_processor(__UpperCamelCase , is_mixed=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) if audio is not None: A = self.feature_extractor( __UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , mask_audio=__UpperCamelCase , **__UpperCamelCase ) A = {} if audio is not None: output_dict.update(__UpperCamelCase ) if images is not None: output_dict.update(__UpperCamelCase ) if images_mixed_dict is not None: output_dict.update(__UpperCamelCase ) return output_dict @property def lowerCamelCase ( self :Tuple ): A = self.image_processor.model_input_names A = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
292
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
1
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def A__ ( ): A = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" ) A = parser.add_subparsers(help="transformers-cli command helpers" ) # Register commands ConvertCommand.register_subcommand(UpperCamelCase ) DownloadCommand.register_subcommand(UpperCamelCase ) EnvironmentCommand.register_subcommand(UpperCamelCase ) RunCommand.register_subcommand(UpperCamelCase ) ServeCommand.register_subcommand(UpperCamelCase ) UserCommands.register_subcommand(UpperCamelCase ) AddNewModelCommand.register_subcommand(UpperCamelCase ) AddNewModelLikeCommand.register_subcommand(UpperCamelCase ) LfsCommands.register_subcommand(UpperCamelCase ) PTtoTFCommand.register_subcommand(UpperCamelCase ) # Let's go A = parser.parse_args() if not hasattr(UpperCamelCase , "func" ): parser.print_help() exit(1 ) # Run A = args.func(UpperCamelCase ) service.run() if __name__ == "__main__": main()
292
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ): pass def A__ ( UpperCamelCase ): A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ): A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ): A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase ) import datasets A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowerCamelCase ( self :Optional[Any] ): pass @slow @require_torch def lowerCamelCase ( self :Optional[Any] ): A = "Intel/dpt-large" A = pipeline("depth-estimation" , model=__UpperCamelCase ) A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowerCamelCase ( self :Optional[Any] ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
292
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : str = logging.get_logger(__name__) _snake_case : Any = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''cvt''' def __init__( self :List[Any] , __UpperCamelCase :str=3 , __UpperCamelCase :Optional[Any]=[7, 3, 3] , __UpperCamelCase :Optional[Any]=[4, 2, 2] , __UpperCamelCase :List[str]=[2, 1, 1] , __UpperCamelCase :List[str]=[64, 1_92, 3_84] , __UpperCamelCase :int=[1, 3, 6] , __UpperCamelCase :Optional[int]=[1, 2, 10] , __UpperCamelCase :List[Any]=[4.0, 4.0, 4.0] , __UpperCamelCase :Optional[int]=[0.0, 0.0, 0.0] , __UpperCamelCase :Any=[0.0, 0.0, 0.0] , __UpperCamelCase :Dict=[0.0, 0.0, 0.1] , __UpperCamelCase :Optional[int]=[True, True, True] , __UpperCamelCase :Union[str, Any]=[False, False, True] , __UpperCamelCase :int=["dw_bn", "dw_bn", "dw_bn"] , __UpperCamelCase :Any=[3, 3, 3] , __UpperCamelCase :Optional[Any]=[1, 1, 1] , __UpperCamelCase :List[Any]=[2, 2, 2] , __UpperCamelCase :Any=[1, 1, 1] , __UpperCamelCase :Optional[int]=[1, 1, 1] , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[int]=1e-12 , **__UpperCamelCase :Union[str, Any] , ): super().__init__(**__UpperCamelCase ) A = num_channels A = patch_sizes A = patch_stride A = patch_padding A = embed_dim A = num_heads A = depth A = mlp_ratio A = attention_drop_rate A = drop_rate A = drop_path_rate A = qkv_bias A = cls_token A = qkv_projection_method A = kernel_qkv A = padding_kv A = stride_kv A = padding_q A = stride_q A = initializer_range A = layer_norm_eps
292
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
1
"""simple docstring""" def A__ ( UpperCamelCase ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) A = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A = 1 if upper_limit > 0: A = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(UpperCamelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: _snake_case : Union[str, Any] = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
292
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A__ ( UpperCamelCase = "laptop" ): A = F"https://www.amazon.in/laptop/s?k={product}" A = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles A = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A = item.ha.text A = "https://www.amazon.in/" + item.ha.a["href"] A = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A = "Not available" try: A = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A = "" try: A = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A = float("nan" ) except AttributeError: pass A = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A = " " A = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
292
1
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() _snake_case : Any = logging.get_logger('transformers.models.encodec') _snake_case : int = { 'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited', 'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size', 'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed', 'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg', } _snake_case : Tuple = { 'encoder.model.0.conv.conv': 'encoder.layers.0.conv', 'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv', 'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv', 'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv', 'encoder.model.3.conv.conv': 'encoder.layers.3.conv', 'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv', 'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv', 'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv', 'encoder.model.6.conv.conv': 'encoder.layers.6.conv', 'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv', 'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv', 'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv', 'encoder.model.9.conv.conv': 'encoder.layers.9.conv', 'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv', 'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv', 'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv', 'encoder.model.12.conv.conv': 'encoder.layers.12.conv', 'encoder.model.13.lstm': 'encoder.layers.13.lstm', 'encoder.model.15.conv.conv': 'encoder.layers.15.conv', } _snake_case : List[str] = { 'encoder.model.0.conv.norm': 'encoder.layers.0.norm', 'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm', 'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm', 'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm', 'encoder.model.3.conv.norm': 'encoder.layers.3.norm', 'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm', 'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm', 'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm', 'encoder.model.6.conv.norm': 'encoder.layers.6.norm', 'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm', 'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm', 'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm', 'encoder.model.9.conv.norm': 'encoder.layers.9.norm', 'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm', 'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm', 'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm', 'encoder.model.12.conv.norm': 'encoder.layers.12.norm', 'encoder.model.15.conv.norm': 'encoder.layers.15.norm', } _snake_case : Optional[int] = { 'decoder.model.0.conv.conv': 'decoder.layers.0.conv', 'decoder.model.1.lstm': 'decoder.layers.1.lstm', 'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv', 'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv', 'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv', 'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv', 'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv', 'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv', 'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv', 'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv', 'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv', 'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv', 'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv', 'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv', 'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv', 'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv', 'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv', 'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv', 'decoder.model.15.conv.conv': 'decoder.layers.15.conv', } _snake_case : int = { 'decoder.model.0.conv.norm': 'decoder.layers.0.norm', 'decoder.model.3.convtr.norm': 'decoder.layers.3.norm', 'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm', 'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm', 'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm', 'decoder.model.6.convtr.norm': 'decoder.layers.6.norm', 'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm', 'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm', 'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm', 'decoder.model.9.convtr.norm': 'decoder.layers.9.norm', 'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm', 'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm', 'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm', 'decoder.model.12.convtr.norm': 'decoder.layers.12.norm', 'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm', 'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm', 'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm', 'decoder.model.15.conv.norm': 'decoder.layers.15.norm', } _snake_case : List[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } _snake_case : int = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } _snake_case : Dict = [] _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): for attribute in key.split("." ): A = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A = getattr(UpperCamelCase , UpperCamelCase ).shape else: A = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": A = value elif weight_type == "weight_g": A = value elif weight_type == "weight_v": A = value elif weight_type == "bias": A = value elif weight_type == "running_mean": A = value elif weight_type == "running_var": A = value elif weight_type == "num_batches_tracked": A = value elif weight_type == "weight_ih_l0": A = value elif weight_type == "weight_hh_l0": A = value elif weight_type == "bias_ih_l0": A = value elif weight_type == "bias_hh_l0": A = value elif weight_type == "weight_ih_l1": A = value elif weight_type == "weight_hh_l1": A = value elif weight_type == "bias_ih_l1": A = value elif weight_type == "bias_hh_l1": A = value else: A = value logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def A__ ( UpperCamelCase , UpperCamelCase ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A, A = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = [] if model_name == "encodec_24khz" or "encodec_32khz": A = MAPPING_24K elif model_name == "encodec_48khz": A = MAPPING_48K else: raise ValueError(F"Unsupported model: {model_name}" ) for name, value in orig_dict.items(): if should_ignore(UpperCamelCase , UpperCamelCase ): logger.info(F"{name} was ignored" ) continue A = False for key, mapped_key in MAPPING.items(): if "*" in key: A, A = key.split(".*." ) if prefix in name and suffix in name: A = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue A = True if "*" in mapped_key: A = name.split(UpperCamelCase )[0].split("." )[-2] A = mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A = "weight_g" elif "weight_v" in name: A = "weight_v" elif "weight_ih_l0" in name: A = "weight_ih_l0" elif "weight_hh_l0" in name: A = "weight_hh_l0" elif "bias_ih_l0" in name: A = "bias_ih_l0" elif "bias_hh_l0" in name: A = "bias_hh_l0" elif "weight_ih_l1" in name: A = "weight_ih_l1" elif "weight_hh_l1" in name: A = "weight_hh_l1" elif "bias_ih_l1" in name: A = "bias_ih_l1" elif "bias_hh_l1" in name: A = "bias_hh_l1" elif "bias" in name: A = "bias" elif "weight" in name: A = "weight" elif "running_mean" in name: A = "running_mean" elif "running_var" in name: A = "running_var" elif "num_batches_tracked" in name: A = "num_batches_tracked" else: A = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) @torch.no_grad() def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , ): if config_path is not None: A = EncodecConfig.from_pretrained(UpperCamelCase ) else: A = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A = [8, 5, 4, 4] A = [2.2] A = 64 A = 32_000 A = 2_048 A = False A = False A = False elif model_name == "encodec_48khz": A = [8, 5, 4, 2] A = [3.0, 6.0, 12.0, 24.0] A = 48_000 A = 2 A = False A = "time_group_norm" A = True A = 1.0 A = 0.01 else: raise ValueError(F"Unknown model name: {model_name}" ) A = EncodecModel(UpperCamelCase ) A = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(UpperCamelCase ) A = torch.load(UpperCamelCase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A = original_checkpoint["best_state"] recursively_load_weights(UpperCamelCase , UpperCamelCase , UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(UpperCamelCase ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--model', default='encodec_24khz', type=str, help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) _snake_case : List[str] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
292
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ): A = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 ) A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): A = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = len(__UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__UpperCamelCase )}." ) # get prompt text embeddings A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A = text_input_ids[:, : self.tokenizer.model_max_length] A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A, A, A = text_embeddings.shape A = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A = 42 if negative_prompt is None: A = [""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=" f" {type(__UpperCamelCase )}." ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A = negative_prompt A = text_input_ids.shape[-1] A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , ) A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A = uncond_embeddings.shape[1] A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to( self.device ) else: A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A = {} if accepts_eta: A = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: A, A = noise_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = 1 / 0.18_215 * latents A = self.vae.decode(__UpperCamelCase ).sample A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
292
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case : Dict = logging.get_logger(__name__) _snake_case : str = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''deta''' UpperCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self :Union[str, Any] , __UpperCamelCase :List[Any]=None , __UpperCamelCase :Dict=9_00 , __UpperCamelCase :Optional[Any]=20_48 , __UpperCamelCase :List[Any]=6 , __UpperCamelCase :Tuple=20_48 , __UpperCamelCase :str=8 , __UpperCamelCase :List[Any]=6 , __UpperCamelCase :Dict=10_24 , __UpperCamelCase :str=8 , __UpperCamelCase :Optional[int]=0.0 , __UpperCamelCase :Any=True , __UpperCamelCase :Optional[int]="relu" , __UpperCamelCase :Union[str, Any]=2_56 , __UpperCamelCase :Dict=0.1 , __UpperCamelCase :Any=0.0 , __UpperCamelCase :List[str]=0.0 , __UpperCamelCase :Union[str, Any]=0.02 , __UpperCamelCase :List[Any]=1.0 , __UpperCamelCase :Tuple=True , __UpperCamelCase :Union[str, Any]=False , __UpperCamelCase :Union[str, Any]="sine" , __UpperCamelCase :Union[str, Any]=5 , __UpperCamelCase :List[str]=4 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Dict=True , __UpperCamelCase :Optional[Any]=3_00 , __UpperCamelCase :str=True , __UpperCamelCase :Tuple=True , __UpperCamelCase :str=1 , __UpperCamelCase :List[str]=5 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Optional[Any]=1 , __UpperCamelCase :int=1 , __UpperCamelCase :Optional[Any]=5 , __UpperCamelCase :str=2 , __UpperCamelCase :int=0.1 , __UpperCamelCase :int=0.25 , **__UpperCamelCase :List[str] , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) A = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__UpperCamelCase , __UpperCamelCase ): A = backbone_config.pop("model_type" ) A = CONFIG_MAPPING[backbone_model_type] A = config_class.from_dict(__UpperCamelCase ) A = backbone_config A = num_queries A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = init_xavier_std A = encoder_layerdrop A = auxiliary_loss A = position_embedding_type # deformable attributes A = num_feature_levels A = encoder_n_points A = decoder_n_points A = two_stage A = two_stage_num_proposals A = with_box_refine A = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = mask_loss_coefficient A = dice_loss_coefficient A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient A = focal_alpha super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase ) @property def lowerCamelCase ( self :Optional[Any] ): return self.encoder_attention_heads @property def lowerCamelCase ( self :str ): return self.d_model def lowerCamelCase ( self :Dict ): A = copy.deepcopy(self.__dict__ ) A = self.backbone_config.to_dict() A = self.__class__.model_type return output
292
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
1