code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : str = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[int]=False , lowercase : int=False ): '''simple docstring''' lowerCamelCase_ = 'backbone.' if is_semantic else '' lowerCamelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", 'beit.embeddings.cls_token'), (f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'), (f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'), (f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Optional[int] , lowercase : int=False , lowercase : List[str]=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): lowerCamelCase_ = 'backbone.' if is_semantic else '' # queries, keys and values lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) lowerCamelCase_ = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase_ = q_bias lowerCamelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase_ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase_ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) lowerCamelCase_ = gamma_a lowerCamelCase_ = gamma_a def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = dct.pop(lowercase ) lowerCamelCase_ = val def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any]=False ): '''simple docstring''' lowerCamelCase_ = False if 'rvlcdip' in checkpoint_url else True lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=lowercase , use_mask_token=lowercase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase_ = 10_24 lowerCamelCase_ = 40_96 lowerCamelCase_ = 24 lowerCamelCase_ = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase_ = 16 lowerCamelCase_ = 'huggingface/label-files' lowerCamelCase_ = 'rvlcdip-id2label.json' lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) ) lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' )['model'] lowerCamelCase_ = create_rename_keys(lowercase , has_lm_head=lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase , has_lm_head=lowercase ) # load HuggingFace model lowerCamelCase_ = BeitForMaskedImageModeling(lowercase ) if has_lm_head else BeitForImageClassification(lowercase ) model.eval() model.load_state_dict(lowercase ) # Check outputs on an image lowerCamelCase_ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=lowercase , return_tensors='pt' ) lowerCamelCase_ = encoding['pixel_values'] lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = outputs.logits # verify logits lowerCamelCase_ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(lowercase ), "Shape of logits not as expected" Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase ) if push_to_hub: if has_lm_head: lowerCamelCase_ = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowerCamelCase_ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(lowercase , lowercase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowercase , ) model.push_to_hub( repo_path_or_name=Path(lowercase , lowercase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowercase , ) if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) lowerCamelCase : List[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
651
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = None UpperCamelCase = None lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess") def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right ) lowerCamelCase_ = 1 - left_distrib_excess lowerCamelCase_ = 1 - right_distrib_excess lowerCamelCase_ = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) lowerCamelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
651
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ = 0 def a__ ( self : Dict ) -> Any: """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' ) self.assertIsInstance(A_ , A_ ) def a__ ( self : Any ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ = Path(A_ ) / 'preprocessor_config.json' lowerCamelCase_ = Path(A_ ) / 'config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(A_ , 'w' ) , ) json.dump({'model_type': 'clip'} , open(A_ , 'w' ) ) lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ = Path(A_ ) / 'preprocessor_config.json' lowerCamelCase_ = Path(A_ ) / 'config.json' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(A_ , 'w' ) , ) json.dump({'model_type': 'clip'} , open(A_ , 'w' ) ) lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowerCamelCase_ = Path(A_ ) / 'preprocessor_config.json' lowerCamelCase_ = Path(A_ ) / 'config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(A_ , 'w' ) , ) json.dump({'model_type': 'clip'} , open(A_ , 'w' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ ).to_dict() config_dict.pop('image_processor_type' ) lowerCamelCase_ = CLIPImageProcessor(**A_ ) # save in new folder model_config.save_pretrained(A_ ) config.save_pretrained(A_ ) lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ ) # make sure private variable is not incorrectly saved lowerCamelCase_ = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(A_ , A_ ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ = Path(A_ ) / 'preprocessor_config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(A_ , 'w' ) , ) lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" with self.assertRaisesRegex( A_ , 'clip-base is not a local folder and is not a valid model identifier' ): lowerCamelCase_ = AutoImageProcessor.from_pretrained('clip-base' ) def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ , revision='aaaaaa' ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( A_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): lowerCamelCase_ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' ) def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" with self.assertRaises(A_ ): lowerCamelCase_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(A_ ): lowerCamelCase_ = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=A_ ) lowerCamelCase_ = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=A_ ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(A_ ) lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ , trust_remote_code=A_ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' ) def a__ ( self : Tuple ) -> Any: """simple docstring""" try: AutoConfig.register('custom' , A_ ) AutoImageProcessor.register(A_ , A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): AutoImageProcessor.register(A_ , A_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ = Path(A_ ) / 'preprocessor_config.json' lowerCamelCase_ = Path(A_ ) / 'config.json' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(A_ , 'w' ) , ) json.dump({'model_type': 'clip'} , open(A_ , 'w' ) ) lowerCamelCase_ = CustomImageProcessor.from_pretrained(A_ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(A_ ) lowerCamelCase_ = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def a__ ( self : int ) -> Tuple: """simple docstring""" class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = True try: AutoConfig.register('custom' , A_ ) AutoImageProcessor.register(A_ , A_ ) # If remote code is not set, the default is to use local lowerCamelCase_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowerCamelCase_ = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=A_ ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowerCamelCase_ = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=A_ ) self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' ) self.assertTrue(not hasattr(A_ , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
651
from manim import * class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('CPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(4 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('GPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) gpu.move_to([-1, -1, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Model' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) model.move_to([3, -1.0, 0] ) self.add(A_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for i, rect in enumerate(A_ ): lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 ) target.move_to(A_ ) model_arr.append(A_ ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(A_ ) self.add(*A_ , *A_ ) lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Disk' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) disk.move_to([-4, -1.25, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(A_ ) lowerCamelCase_ = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ ) ) lowerCamelCase_ = Square(0.3 ) input.set_fill(A_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , A_ , buff=0.5 ) self.play(Write(A_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 ) self.play(MoveToTarget(A_ ) ) self.play(FadeOut(A_ ) ) lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , A_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCamelCase_ = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) ) lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCamelCase_ = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCamelCase_ = AnimationGroup( FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(A_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCamelCase_ = 0.7 self.play( Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCamelCase_ = a_c lowerCamelCase_ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , ) lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) ) self.wait()
651
1
import unittest import numpy as np def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray | None = None , ): '''simple docstring''' lowerCamelCase_ = np.shape(lowercase ) lowerCamelCase_ = np.shape(lowercase ) lowerCamelCase_ = np.shape(lowercase ) if shape_a[0] != shape_b[0]: lowerCamelCase_ = ( 'Expected the same number of rows for A and B. ' f"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(lowercase ) if shape_b[1] != shape_c[1]: lowerCamelCase_ = ( 'Expected the same number of columns for B and C. ' f"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(lowercase ) lowerCamelCase_ = pseudo_inv if a_inv is None: try: lowerCamelCase_ = np.linalg.inv(lowercase ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> None: """simple docstring""" lowerCamelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCamelCase_ = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCamelCase_ = np.array([[2, 1], [6, 3]] ) lowerCamelCase_ = schur_complement(A_ , A_ , A_ ) lowerCamelCase_ = np.block([[a, b], [b.T, c]] ) lowerCamelCase_ = np.linalg.det(A_ ) lowerCamelCase_ = np.linalg.det(A_ ) lowerCamelCase_ = np.linalg.det(A_ ) self.assertAlmostEqual(A_ , det_a * det_s ) def a__ ( self : Optional[Any] ) -> None: """simple docstring""" lowerCamelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCamelCase_ = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCamelCase_ = np.array([[2, 1], [6, 3]] ) with self.assertRaises(A_ ): schur_complement(A_ , A_ , A_ ) def a__ ( self : int ) -> None: """simple docstring""" lowerCamelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCamelCase_ = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCamelCase_ = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(A_ ): schur_complement(A_ , A_ , A_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
651
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' return EnvironmentCommand() class A( UpperCamelCase ): '''simple docstring''' @staticmethod def a__ ( A_ : ArgumentParser ) -> str: """simple docstring""" lowerCamelCase_ = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = 'not installed' lowerCamelCase_ = 'NA' if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = 'not installed' if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = 'not installed' if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = 'not installed' if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def a__ ( A_ : Dict ) -> Any: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
651
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[Any] = { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json" ), "distilbert-base-uncased-finetuned-sst-2-english": ( "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json" ), } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''distilbert''' UpperCamelCase = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self : Optional[Any] , A_ : str=30522 , A_ : Any=512 , A_ : Any=False , A_ : Union[str, Any]=6 , A_ : List[str]=12 , A_ : Optional[Any]=768 , A_ : str=4 * 768 , A_ : Optional[int]=0.1 , A_ : Tuple=0.1 , A_ : int="gelu" , A_ : Optional[int]=0.02 , A_ : Tuple=0.1 , A_ : List[str]=0.2 , A_ : Optional[Any]=0 , **A_ : Optional[Any] , ) -> Any: """simple docstring""" lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = sinusoidal_pos_embds lowerCamelCase_ = n_layers lowerCamelCase_ = n_heads lowerCamelCase_ = dim lowerCamelCase_ = hidden_dim lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation lowerCamelCase_ = initializer_range lowerCamelCase_ = qa_dropout lowerCamelCase_ = seq_classif_dropout super().__init__(**A_ , pad_token_id=A_ ) class A( UpperCamelCase ): '''simple docstring''' @property def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCamelCase_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
651
from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 11 lowerCamelCase_ = int('1' + '0' * digit_len ) for num in range(lowercase , lowercase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase , lowercase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCamelCase_ = 10 return solutions def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ): '''simple docstring''' lowerCamelCase_ = 1.0 for fraction in fraction_list(lowercase ): lowerCamelCase_ = Fraction(lowercase ) result *= frac.denominator / frac.numerator return int(lowercase ) if __name__ == "__main__": print(solution())
651
1
from __future__ import annotations lowerCamelCase : List[str] = list[list[int]] # assigning initial values to the grid lowerCamelCase : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution lowerCamelCase : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _SCREAMING_SNAKE_CASE ( lowercase : Matrix , lowercase : int , lowercase : int , lowercase : int ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _SCREAMING_SNAKE_CASE ( lowercase : Matrix ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _SCREAMING_SNAKE_CASE ( lowercase : Matrix ): '''simple docstring''' if location := find_empty_location(lowercase ): lowerCamelCase_ , lowerCamelCase_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(lowercase , lowercase , lowercase , lowercase ): lowerCamelCase_ = digit if sudoku(lowercase ) is not None: return grid lowerCamelCase_ = 0 return None def _SCREAMING_SNAKE_CASE ( lowercase : Matrix ): '''simple docstring''' for row in grid: for cell in row: print(lowercase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") lowerCamelCase : Dict = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
651
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = size if size is not None else {'shortest_edge': 224} lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] ) lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ ) lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ ) def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray: """simple docstring""" return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray: """simple docstring""" return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(A_ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images] lowerCamelCase_ = {'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
651
1
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowerCamelCase : Dict = logging.getLogger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''sequence-classification''' def __init__( self : Optional[Any] , A_ : int ) -> Tuple: """simple docstring""" if type(A_ ) == dict: lowerCamelCase_ = Namespace(**A_ ) lowerCamelCase_ = glue_output_modes[hparams.task] lowerCamelCase_ = glue_tasks_num_labels[hparams.task] super().__init__(A_ , A_ , self.mode ) def a__ ( self : List[Any] , **A_ : str ) -> List[Any]: """simple docstring""" return self.model(**A_ ) def a__ ( self : int , A_ : Any , A_ : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase_ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None lowerCamelCase_ = self(**A_ ) lowerCamelCase_ = outputs[0] lowerCamelCase_ = self.trainer.lr_schedulers[0]['scheduler'] lowerCamelCase_ = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a__ ( self : Any ) -> Dict: """simple docstring""" lowerCamelCase_ = self.hparams lowerCamelCase_ = processors[args.task]() lowerCamelCase_ = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase_ = self._feature_file(A_ ) if os.path.exists(A_ ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , A_ ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) lowerCamelCase_ = ( processor.get_dev_examples(args.data_dir ) if mode == 'dev' else processor.get_train_examples(args.data_dir ) ) lowerCamelCase_ = convert_examples_to_features( A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('Saving features into cached file %s' , A_ ) torch.save(A_ , A_ ) def a__ ( self : Dict , A_ : str , A_ : int , A_ : bool = False ) -> DataLoader: """simple docstring""" lowerCamelCase_ = 'dev' if mode == 'test' else mode lowerCamelCase_ = self._feature_file(A_ ) logger.info('Loading features from cached file %s' , A_ ) lowerCamelCase_ = torch.load(A_ ) lowerCamelCase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase_ = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase_ = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , ) def a__ ( self : Tuple , A_ : Any , A_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase_ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None lowerCamelCase_ = self(**A_ ) lowerCamelCase_ , lowerCamelCase_ = outputs[:2] lowerCamelCase_ = logits.detach().cpu().numpy() lowerCamelCase_ = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a__ ( self : List[Any] , A_ : Dict ) -> tuple: """simple docstring""" lowerCamelCase_ = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase_ = np.concatenate([x['pred'] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase_ = np.argmax(A_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase_ = np.squeeze(A_ ) lowerCamelCase_ = np.concatenate([x['target'] for x in outputs] , axis=0 ) lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase_ = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )} lowerCamelCase_ = dict(results.items() ) lowerCamelCase_ = results return ret, preds_list, out_label_list def a__ ( self : Union[str, Any] , A_ : list ) -> dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._eval_end(A_ ) lowerCamelCase_ = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a__ ( self : List[str] , A_ : Union[str, Any] ) -> dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._eval_end(A_ ) lowerCamelCase_ = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a__ ( A_ : Optional[Any] , A_ : List[Any] ) -> Tuple: """simple docstring""" BaseTransformer.add_model_specific_args(A_ , A_ ) parser.add_argument( '--max_seq_length' , default=128 , type=A_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--task' , default='' , type=A_ , required=A_ , help='The GLUE task to run' , ) parser.add_argument( '--gpus' , default=0 , type=A_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser() add_generic_args(lowercase , os.getcwd() ) lowerCamelCase_ = GLUETransformer.add_model_specific_args(lowercase , os.getcwd() ) lowerCamelCase_ = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase_ = os.path.join( './results' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) lowerCamelCase_ = GLUETransformer(lowercase ) lowerCamelCase_ = generic_train(lowercase , lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=lowercase ) ) lowerCamelCase_ = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(lowercase ) if __name__ == "__main__": main()
651
import cva import numpy as np class A: '''simple docstring''' def __init__( self : int , A_ : float , A_ : int ) -> List[Any]: """simple docstring""" if k in (0.04, 0.06): lowerCamelCase_ = k lowerCamelCase_ = window_size else: raise ValueError('invalid k value' ) def __str__( self : str ) -> str: """simple docstring""" return str(self.k ) def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowerCamelCase_ = cva.imread(A_ , 0 ) lowerCamelCase_ , lowerCamelCase_ = img.shape lowerCamelCase_ = [] lowerCamelCase_ = img.copy() lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB ) lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ ) lowerCamelCase_ = dx**2 lowerCamelCase_ = dy**2 lowerCamelCase_ = dx * dy lowerCamelCase_ = 0.04 lowerCamelCase_ = self.window_size // 2 for y in range(A_ , h - offset ): for x in range(A_ , w - offset ): lowerCamelCase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = (wxx * wyy) - (wxy**2) lowerCamelCase_ = wxx + wyy lowerCamelCase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3) lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
651
1
class A: '''simple docstring''' def __init__( self : Any , A_ : list ) -> None: """simple docstring""" lowerCamelCase_ = set_counts lowerCamelCase_ = max(A_ ) lowerCamelCase_ = len(A_ ) lowerCamelCase_ = [1] * num_sets lowerCamelCase_ = list(range(A_ ) ) def a__ ( self : Any , A_ : int , A_ : int ) -> bool: """simple docstring""" lowerCamelCase_ = self.get_parent(A_ ) lowerCamelCase_ = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowerCamelCase_ = 0 lowerCamelCase_ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowerCamelCase_ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowerCamelCase_ = 0 lowerCamelCase_ = src_parent lowerCamelCase_ = self.set_counts[src_parent] lowerCamelCase_ = max(self.max_set , A_ ) return True def a__ ( self : Tuple , A_ : int ) -> int: """simple docstring""" if self.parents[disj_set] == disj_set: return disj_set lowerCamelCase_ = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
651
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCamelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.encoder ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(A_ ): try: lowerCamelCase_ = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = new_word if len(A_ ) == 1: break else: lowerCamelCase_ = get_pairs(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = word return word def a__ ( self : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , A_ ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : str ) -> Optional[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def a__ ( self : Tuple , A_ : Dict ) -> List[Any]: """simple docstring""" return self.decoder.get(A_ ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ''.join(A_ ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) lowerCamelCase_ = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs) def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict: """simple docstring""" return token_ids_a + [self.eos_token_id] def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = self.encode(A_ ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
651
1
import math from numpy import inf from scipy.integrate import quad def _SCREAMING_SNAKE_CASE ( lowercase : float ): '''simple docstring''' if num <= 0: raise ValueError('math domain error' ) return quad(lowercase , 0 , lowercase , args=(lowercase) )[0] def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ): '''simple docstring''' return math.pow(lowercase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
651
lowerCamelCase : Dict = "Alexander Joslin" import operator as op from .stack import Stack def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowercase ) ) elif i in operators: # RULE 2 operator_stack.push(lowercase ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](lowercase , lowercase ) operand_stack.push(lowercase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
651
1
from __future__ import annotations import requests def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase ).json() def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 ): '''simple docstring''' lowerCamelCase_ = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' lowerCamelCase_ = requests.get(lowercase ).json()[:max_stories] return [get_hackernews_story(lowercase ) for story_id in story_ids] def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 ): '''simple docstring''' lowerCamelCase_ = hackernews_top_stories(lowercase ) return "\n".join('* [{title}]({url})'.format(**lowercase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
651
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) print('The following activities are selected:' ) # The first activity is always selected lowerCamelCase_ = 0 print(lowercase , end=',' ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end=',' ) lowerCamelCase_ = j if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5] lowerCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
651
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowerCamelCase : int = False @skip_mps class A( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = StableDiffusionAttendAndExcitePipeline UpperCamelCase = False UpperCamelCase = TEXT_TO_IMAGE_PARAMS UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls : Dict ) -> Optional[int]: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(A_ ) @classmethod def a__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(A_ ) def a__ ( self : int ) -> Dict: """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A_ , ) lowerCamelCase_ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) lowerCamelCase_ = CLIPTextModel(A_ ) lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a__ ( self : str , A_ : Any , A_ : Any=0 ) -> Tuple: """simple docstring""" if str(A_ ).startswith('mps' ): lowerCamelCase_ = torch.manual_seed(A_ ) else: lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ ) lowerCamelCase_ = lowerCamelCase_ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'cpu' lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = self.pipeline_class(**A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCamelCase_ = self.get_dummy_inputs(A_ ) lowerCamelCase_ = pipe(**A_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase_ = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A_ , 1E-3 ) def a__ ( self : Dict ) -> Tuple: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def a__ ( self : int ) -> int: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def a__ ( self : Optional[int] ) -> int: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def a__ ( self : Tuple ) -> int: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class A( unittest.TestCase ): '''simple docstring''' @classmethod def a__ ( cls : Tuple ) -> Tuple: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(A_ ) @classmethod def a__ ( cls : int ) -> int: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(A_ ) def a__ ( self : Dict ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> Dict: """simple docstring""" lowerCamelCase_ = torch.manual_seed(51 ) lowerCamelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=A_ , torch_dtype=torch.floataa ) pipe.to('cuda' ) lowerCamelCase_ = 'a painting of an elephant with glasses' lowerCamelCase_ = [5, 7] lowerCamelCase_ = pipe( prompt=A_ , token_indices=A_ , guidance_scale=7.5 , generator=A_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] lowerCamelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
651
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride lowerCamelCase_ = out_features lowerCamelCase_ = out_indices def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : List[Any] ) -> Any: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = FocalNetModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase_ = None lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) UpperCamelCase = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self : Any ) -> Optional[int]: """simple docstring""" return def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def a__ ( self : int ) -> int: """simple docstring""" pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A_ ) , A_ ) # FocalNet has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape lowerCamelCase_ = ( reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) @slow def a__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = FocalNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(A_ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=A_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**A_ ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else () UpperCamelCase = FocalNetConfig UpperCamelCase = False def a__ ( self : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self )
651
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Dict = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ ) self.assertEqual( A_ , [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ] , ) lowerCamelCase_ = text_generator.model.config.eos_token_id lowerCamelCase_ = '<pad>' lowerCamelCase_ = text_generator( ['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , ) self.assertEqual( A_ , [ [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], ] , ) @require_tf def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ ) return text_generator, ["This is a test", "Another test"] def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = 'Hello I believe in' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) lowerCamelCase_ = text_generator(A_ ) self.assertEqual( A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' ) self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] ) def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = text_generator.model lowerCamelCase_ = text_generator.tokenizer lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ ) lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase_ = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase_ = text_generator('' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase_ = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500 , max_new_tokens=20 ) lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(A_ ): text_generator( 'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" import torch # Classic `model_kwargs` lowerCamelCase_ = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def a__ ( self : int ) -> str: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : List[Any] ) -> Dict: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=A_ , top_p=0.5 ) def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = 'Hello world' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' ) else: lowerCamelCase_ = logging.get_logger('transformers.generation.utils' ) lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 ) self.assertIn(A_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 ) self.assertNotIn(A_ , cl.out ) with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 ) self.assertNotIn(A_ , cl.out )
651
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class A: '''simple docstring''' def __init__( self : Optional[int] , A_ : List[str] , A_ : Optional[int]=3 , A_ : List[str]=7 , A_ : Dict=True , A_ : Tuple=True , A_ : List[Any]=False , A_ : Tuple=True , A_ : Union[str, Any]=99 , A_ : List[str]=32 , A_ : List[str]=5 , A_ : Tuple=4 , A_ : Union[str, Any]=37 , A_ : Optional[int]="gelu" , A_ : str=0.1 , A_ : List[str]=0.1 , A_ : Tuple=512 , A_ : Any=16 , A_ : int=2 , A_ : List[Any]=0.02 , A_ : int=3 , A_ : int=4 , A_ : List[str]=None , ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , ) def a__ ( self : Optional[int] , A_ : str , A_ : str , A_ : str , A_ : List[str] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = FalconModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : Any , A_ : Optional[int] , A_ : Any , A_ : List[str] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Tuple , ) -> Any: """simple docstring""" lowerCamelCase_ = True lowerCamelCase_ = FalconModel(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) lowerCamelCase_ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) lowerCamelCase_ = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : Optional[int] , A_ : Dict , A_ : Any , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : str , A_ : Optional[Any] , A_ : Dict , A_ : List[str] , A_ : Union[str, Any] , ) -> int: """simple docstring""" lowerCamelCase_ = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : Tuple , A_ : str , A_ : Any , A_ : List[Any] , A_ : List[str] , A_ : Dict , A_ : str , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass lowerCamelCase_ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) lowerCamelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCamelCase_ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] lowerCamelCase_ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else () UpperCamelCase = ( { '''feature-extraction''': FalconModel, '''text-classification''': FalconForSequenceClassification, '''text-generation''': FalconForCausalLM, '''question-answering''': FalconForQuestionAnswering, '''token-classification''': FalconForTokenClassification, '''zero-shot''': FalconForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FalconModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 ) def a__ ( self : Any ) -> int: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ , *lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: lowerCamelCase_ = alibi self.model_tester.create_and_check_model(A_ , *A_ ) def a__ ( self : Dict ) -> List[str]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = input_dict['input_ids'] lowerCamelCase_ = input_ids.ne(1 ).to(A_ ) lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCamelCase_ = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a__ ( self : List[Any] ) -> int: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = 'single_label_classification' lowerCamelCase_ = input_dict['input_ids'] lowerCamelCase_ = input_ids.ne(1 ).to(A_ ) lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCamelCase_ = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = input_dict['input_ids'] lowerCamelCase_ = FalconForCausalLM(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , use_cache=A_ ) lowerCamelCase_ = input_ids.shape[0] lowerCamelCase_ = model._convert_to_rw_cache(result.past_key_values ) lowerCamelCase_ = model._convert_cache_to_standard_format(A_ , A_ ) for layer in range(len(A_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = 'multi_label_classification' lowerCamelCase_ = input_dict['input_ids'] lowerCamelCase_ = input_ids.ne(1 ).to(A_ ) lowerCamelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase_ = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a__ ( self : str ) -> Dict: """simple docstring""" for model_class in self.all_generative_model_classes: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(A_ , 'use_cache' ): return lowerCamelCase_ = model_class(A_ ).to(A_ ) if "use_cache" not in inputs: lowerCamelCase_ = True lowerCamelCase_ = model(**A_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return lowerCamelCase_ = ( getattr(A_ , 'decoder_layers' , A_ ) or getattr(A_ , 'num_decoder_layers' , A_ ) or config.num_hidden_layers ) lowerCamelCase_ = getattr(A_ , 'num_kv_heads' , config.num_attention_heads ) lowerCamelCase_ = getattr(A_ , 'd_model' , config.hidden_size ) lowerCamelCase_ = embed_dim // num_attention_heads lowerCamelCase_ = outputs['past_key_values'] self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ , lowerCamelCase_ = inputs['input_ids'].shape for i in range(A_ ): if config.new_decoder_architecture: lowerCamelCase_ = config.num_attention_heads elif config.multi_query: lowerCamelCase_ = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class A( unittest.TestCase ): '''simple docstring''' @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) lowerCamelCase_ = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(A_ ) lowerCamelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ ) lowerCamelCase_ = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) lowerCamelCase_ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 ) lowerCamelCase_ = tokenizer.batch_decode(A_ )[0] self.assertEqual(A_ , A_ ) @slow def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: lowerCamelCase_ = AutoTokenizer.from_pretrained(A_ ) lowerCamelCase_ = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(A_ ) lowerCamelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**A_ , do_sample=A_ , max_new_tokens=4 ) model.generate(**A_ , do_sample=A_ , max_new_tokens=4 ) model.generate(**A_ , num_beams=2 , max_new_tokens=4 ) @slow def a__ ( self : str ) -> Optional[int]: """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: lowerCamelCase_ = AutoTokenizer.from_pretrained(A_ ) lowerCamelCase_ = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(device=A_ ) lowerCamelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ ) # Test results are the same with and without cache lowerCamelCase_ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ ) lowerCamelCase_ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
651
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) lowerCamelCase_ = self.diffusers_dir shutil.copy( os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int: """simple docstring""" lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCamelCase_ = black.format_str(A_ , mode=A_ ) lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(A_ , 'w' , newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A_ ) with open(A_ , 'r' ) as f: self.assertTrue(f.read() , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , ) # Copy consistency with a really long name lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
651
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = None ops.enable_eager_execution_internal() lowerCamelCase_ = tf.config.list_physical_devices('CPU' ) if len(A_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' ) lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ = GradientAccumulator() lowerCamelCase_ = tf.Variable([4.0, 3.0] ) lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 ) lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ ) def accumulate_on_replica(A_ : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(A_ : List[Any] , A_ : Tuple ): with strategy.scope(): lowerCamelCase_ = strategy.experimental_local_results(A_ ) local_variables[0].assign(A_ ) local_variables[1].assign(A_ ) strategy.run(A_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A_ ) def _check_local_values(A_ : List[Any] , A_ : str ): lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
651
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = None ops.enable_eager_execution_internal() lowerCamelCase_ = tf.config.list_physical_devices('CPU' ) if len(A_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' ) lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ = GradientAccumulator() lowerCamelCase_ = tf.Variable([4.0, 3.0] ) lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 ) lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ ) def accumulate_on_replica(A_ : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(A_ : List[Any] , A_ : Tuple ): with strategy.scope(): lowerCamelCase_ = strategy.experimental_local_results(A_ ) local_variables[0].assign(A_ ) local_variables[1].assign(A_ ) strategy.run(A_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A_ ) def _check_local_values(A_ : List[Any] , A_ : str ): lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
651
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class A: '''simple docstring''' @staticmethod def a__ ( *A_ : Any , **A_ : str ) -> Tuple: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( lowercase : Image ): '''simple docstring''' lowerCamelCase_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def a__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : Any , A_ : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ = DepthEstimationPipeline(model=A_ , image_processor=A_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def a__ ( self : List[Any] , A_ : List[str] , A_ : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , A_ ) import datasets lowerCamelCase_ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowerCamelCase_ = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , A_ , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" pass @slow @require_torch def a__ ( self : str ) -> Tuple: """simple docstring""" lowerCamelCase_ = 'Intel/dpt-large' lowerCamelCase_ = pipeline('depth-estimation' , model=A_ ) lowerCamelCase_ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowerCamelCase_ = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 ) @require_torch def a__ ( self : Dict ) -> List[str]: """simple docstring""" self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
651
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg") lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = cn.convert_to_negative(lowercase ) # assert negative_img array for at least one True assert negative_img.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase_ = canny.canny(lowercase ) # assert canny array for at least one True assert canny_array.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase ) assert res.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert med.median_filter(lowercase , 3 ).any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase ) assert grad.any() and theta.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = sp.make_sepia(lowercase , 20 ) assert sepia.all() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCamelCase_ = imread(lowercase , 0 ) # Test for get_neighbors_pixel function() return not None lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = image[x_coordinate][y_coordinate] lowerCamelCase_ = lbp.get_neighbors_pixel( lowercase , lowercase , lowercase , lowercase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase ) assert lbp_image.any()
651
1
def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
651
class A: '''simple docstring''' def __init__( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = {} def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int: """simple docstring""" if vertex not in self.adjacency: lowerCamelCase_ = {} self.num_vertices += 1 def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple: """simple docstring""" self.add_vertex(A_ ) self.add_vertex(A_ ) if head == tail: return lowerCamelCase_ = weight lowerCamelCase_ = weight def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for i in range(len(A_ ) ): lowerCamelCase_ = list(edges[i] ) edges.sort(key=lambda A_ : e[2] ) for i in range(len(A_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowerCamelCase_ = edges[i][2] + 1 for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = weight lowerCamelCase_ = weight def __str__( self : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = '' for tail in self.adjacency: for head in self.adjacency[tail]: lowerCamelCase_ = self.adjacency[head][tail] string += f"""{head} -> {tail} == {weight}\n""" return string.rstrip('\n' ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a__ ( self : List[str] ) -> int: """simple docstring""" return self.adjacency.keys() @staticmethod def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]: """simple docstring""" lowerCamelCase_ = Graph() if vertices is None: lowerCamelCase_ = [] if edges is None: lowerCamelCase_ = [] for vertex in vertices: g.add_vertex(A_ ) for edge in edges: g.add_edge(*A_ ) return g class A: '''simple docstring''' def __init__( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = {} lowerCamelCase_ = {} def __len__( self : Any ) -> List[str]: """simple docstring""" return len(self.parent ) def a__ ( self : List[str] , A_ : Any ) -> Dict: """simple docstring""" if item in self.parent: return self.find(A_ ) lowerCamelCase_ = item lowerCamelCase_ = 0 return item def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]: """simple docstring""" if item not in self.parent: return self.make_set(A_ ) if item != self.parent[item]: lowerCamelCase_ = self.find(self.parent[item] ) return self.parent[item] def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.find(A_ ) lowerCamelCase_ = self.find(A_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] < self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowerCamelCase_ = roota return roota return None @staticmethod def a__ ( A_ : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = graph.num_vertices lowerCamelCase_ = Graph.UnionFind() lowerCamelCase_ = [] while num_components > 1: lowerCamelCase_ = {} for vertex in graph.get_vertices(): lowerCamelCase_ = -1 lowerCamelCase_ = graph.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = union_find.find(A_ ) lowerCamelCase_ = union_find.find(A_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex] if union_find.find(A_ ) != union_find.find(A_ ): union_find.union(A_ , A_ ) mst_edges.append(cheap_edge[vertex] ) lowerCamelCase_ = num_components - 1 lowerCamelCase_ = Graph.build(edges=A_ ) return mst
651
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase : Any = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 0 for i in range(1 , 10_01 ): total += i**i return str(lowercase )[-10:] if __name__ == "__main__": print(solution())
651
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' return EnvironmentCommand() class A( UpperCamelCase ): '''simple docstring''' @staticmethod def a__ ( A_ : ArgumentParser ) -> str: """simple docstring""" lowerCamelCase_ = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = 'not installed' lowerCamelCase_ = 'NA' if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = 'not installed' if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = 'not installed' if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = 'not installed' if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def a__ ( A_ : Dict ) -> Any: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
651
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"] lowerCamelCase : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
1
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] , lowercase : list[int] , lowercase : list[list[str]] , lowercase : int , ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(lowercase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase , lowercase , ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] depth_first_search([] , [] , [] , lowercase , lowercase ) # Print all the boards for board in boards: for column in board: print(lowercase ) print('' ) print(len(lowercase ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
651
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCamelCase : int = datasets.logging.get_logger(__name__) lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ): '''simple docstring''' lowerCamelCase_ = {doc: key_lines} lowerCamelCase_ = {doc: sys_lines} lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) if remove_nested: lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ): '''simple docstring''' lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 for name, metric in metrics: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: lowerCamelCase_ = (conll / 3) * 1_00 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: lowerCamelCase_ = line.split()[5] if not parse_col == "-": lowerCamelCase_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A( datasets.Metric ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]: """simple docstring""" lowerCamelCase_ = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: lowerCamelCase_ = util.check_gold_parse_annotation(A_ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase_ = evaluate( key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , ) return score
651
1
from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 11 lowerCamelCase_ = int('1' + '0' * digit_len ) for num in range(lowercase , lowercase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase , lowercase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCamelCase_ = 10 return solutions def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ): '''simple docstring''' lowerCamelCase_ = 1.0 for fraction in fraction_list(lowercase ): lowerCamelCase_ = Fraction(lowercase ) result *= frac.denominator / frac.numerator return int(lowercase ) if __name__ == "__main__": print(solution())
651
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase ) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''text''': Value('''string''' )} ) UpperCamelCase = Features({} ) UpperCamelCase = "text" @property def a__ ( self : List[Any] ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
651
1
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) lowerCamelCase : Any = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n" class A( UpperCamelCase ): '''simple docstring''' @add_start_docstrings(A_ ) def __call__( self : Any , A_ : torch.LongTensor , A_ : torch.FloatTensor , **A_ : str ) -> bool: """simple docstring""" raise NotImplementedError('StoppingCriteria needs to be subclassed' ) class A( UpperCamelCase ): '''simple docstring''' def __init__( self : List[str] , A_ : int , A_ : Optional[int] = None ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = max_length lowerCamelCase_ = max_position_embeddings @add_start_docstrings(A_ ) def __call__( self : List[str] , A_ : torch.LongTensor , A_ : torch.FloatTensor , **A_ : Optional[int] ) -> bool: """simple docstring""" lowerCamelCase_ = input_ids.shape[-1] lowerCamelCase_ = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( 'This is a friendly reminder - the current text generation call will exceed the model\'s predefined ' f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ 'exceptions, performance degradation, or nothing at all.' ) return is_done class A( UpperCamelCase ): '''simple docstring''' def __init__( self : List[str] , A_ : int , A_ : int ) -> Optional[Any]: """simple docstring""" warnings.warn( 'The class `MaxNewTokensCriteria` is deprecated. ' f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ 'with `max_length = start_length + max_new_tokens` instead.' , A_ , ) lowerCamelCase_ = start_length lowerCamelCase_ = max_new_tokens lowerCamelCase_ = start_length + max_new_tokens @add_start_docstrings(A_ ) def __call__( self : Dict , A_ : torch.LongTensor , A_ : torch.FloatTensor , **A_ : Optional[int] ) -> bool: """simple docstring""" return input_ids.shape[-1] >= self.max_length class A( UpperCamelCase ): '''simple docstring''' def __init__( self : Dict , A_ : float , A_ : Optional[float] = None ) -> Any: """simple docstring""" lowerCamelCase_ = max_time lowerCamelCase_ = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(A_ ) def __call__( self : Union[str, Any] , A_ : torch.LongTensor , A_ : torch.FloatTensor , **A_ : Tuple ) -> bool: """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class A( UpperCamelCase ): '''simple docstring''' @add_start_docstrings(A_ ) def __call__( self : Optional[int] , A_ : torch.LongTensor , A_ : torch.FloatTensor , **A_ : Optional[int] ) -> bool: """simple docstring""" return any(criteria(A_ , A_ ) for criteria in self ) @property def a__ ( self : str ) -> Optional[int]: """simple docstring""" for stopping_criterium in self: if isinstance(A_ , A_ ): return stopping_criterium.max_length elif isinstance(A_ , A_ ): return stopping_criterium.max_length return None def _SCREAMING_SNAKE_CASE ( lowercase : StoppingCriteriaList , lowercase : int ): '''simple docstring''' lowerCamelCase_ = stopping_criteria.max_length lowerCamelCase_ = deepcopy(lowercase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , lowercase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowercase ) ) return new_stopping_criteria
651
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''new-model''' if is_tf_available(): class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = NewModelConfig @require_tf class A( unittest.TestCase ): '''simple docstring''' @slow def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : int ) -> str: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Any ) -> List[Any]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Tuple ) -> str: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Any: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow @require_tensorflow_probability def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained( A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = copy.deepcopy(model.config ) lowerCamelCase_ = ['FunnelBaseModel'] lowerCamelCase_ = TFAutoModel.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : Any ) -> Tuple: """simple docstring""" try: AutoConfig.register('new-model' , A_ ) lowerCamelCase_ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) auto_class.register(A_ , A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCamelCase_ = BertModelTester(self ).get_config() lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() ) lowerCamelCase_ = auto_class.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = auto_class.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def a__ ( self : int ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'bert-base is not a local folder and is not a valid model identifier' ): lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' ) def a__ ( self : Any ) -> Dict: """simple docstring""" with self.assertRaisesRegex( A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' ) def a__ ( self : str ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def a__ ( self : Any ) -> List[Any]: """simple docstring""" with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
651
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } lowerCamelCase_ = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(A_ ) , A_ ) def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A_ ) , x.transpose() ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(transpose(A_ ) , transpose(A_ ).numpy() ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , transpose(A_ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def a__ ( self : int ) -> List[Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(transpose(A_ ) , transpose(A_ ).numpy() ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , transpose(A_ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(transpose(A_ ) , np.asarray(transpose(A_ ) ) ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , np.asarray(transpose(A_ , axes=(1, 2, 0) ) ) ) ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , np.reshape(A_ , (4, 3) ) ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A_ , (12, 5) ) , np.reshape(A_ , (12, 5) ) ) ) @require_torch def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , reshape(A_ , (4, 3) ).numpy() ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(reshape(A_ , (12, 5) ) , reshape(A_ , (12, 5) ).numpy() ) ) @require_tf def a__ ( self : Any ) -> Any: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , reshape(A_ , (4, 3) ).numpy() ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(reshape(A_ , (12, 5) ) , reshape(A_ , (12, 5) ).numpy() ) ) @require_flax def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , np.asarray(reshape(A_ , (4, 3) ) ) ) ) lowerCamelCase_ = np.random.randn(3 , 4 , 5 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(reshape(A_ , (12, 5) ) , np.asarray(reshape(A_ , (12, 5) ) ) ) ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A_ ) , np.squeeze(A_ ) ) ) lowerCamelCase_ = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , np.squeeze(A_ , axis=2 ) ) ) @require_torch def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = np.random.randn(1 , 3 , 4 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(squeeze(A_ ) , squeeze(A_ ).numpy() ) ) lowerCamelCase_ = np.random.randn(1 , 4 , 1 , 5 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , squeeze(A_ , axis=2 ).numpy() ) ) @require_tf def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(1 , 3 , 4 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(squeeze(A_ ) , squeeze(A_ ).numpy() ) ) lowerCamelCase_ = np.random.randn(1 , 4 , 1 , 5 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , squeeze(A_ , axis=2 ).numpy() ) ) @require_flax def a__ ( self : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = np.random.randn(1 , 3 , 4 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(squeeze(A_ ) , np.asarray(squeeze(A_ ) ) ) ) lowerCamelCase_ = np.random.randn(1 , 4 , 1 , 5 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , np.asarray(squeeze(A_ , axis=2 ) ) ) ) def a__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , np.expand_dims(A_ , axis=1 ) ) ) @require_torch def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = torch.tensor(A_ ) self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , expand_dims(A_ , axis=1 ).numpy() ) ) @require_tf def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = tf.constant(A_ ) self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , expand_dims(A_ , axis=1 ).numpy() ) ) @require_flax def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = np.random.randn(3 , 4 ) lowerCamelCase_ = jnp.array(A_ ) self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , np.asarray(expand_dims(A_ , axis=1 ) ) ) )
651
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[str] = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''gpt_neox_japanese''' def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_multiple_size lowerCamelCase_ = hidden_act lowerCamelCase_ = rotary_pct lowerCamelCase_ = rotary_emb_base lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = use_cache lowerCamelCase_ = attention_dropout lowerCamelCase_ = hidden_dropout
651
1
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A( UpperCamelCase ): '''simple docstring''' def __init__( self : List[str] , A_ : TransformeraDModel , A_ : AutoencoderKL , A_ : KarrasDiffusionSchedulers , A_ : Optional[Dict[int, str]] = None , ) -> Tuple: """simple docstring""" super().__init__() self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ ) # create a imagenet -> id dictionary for easier use lowerCamelCase_ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): lowerCamelCase_ = int(A_ ) lowerCamelCase_ = dict(sorted(self.labels.items() ) ) def a__ ( self : Dict , A_ : Union[str, List[str]] ) -> List[int]: """simple docstring""" if not isinstance(A_ , A_ ): lowerCamelCase_ = list(A_ ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : int , A_ : List[int] , A_ : float = 4.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" lowerCamelCase_ = len(A_ ) lowerCamelCase_ = self.transformer.config.sample_size lowerCamelCase_ = self.transformer.config.in_channels lowerCamelCase_ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , ) lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowerCamelCase_ = torch.tensor(A_ , device=self.device ).reshape(-1 ) lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device ) lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowerCamelCase_ = latent_model_input[: len(A_ ) // 2] lowerCamelCase_ = torch.cat([half, half] , dim=0 ) lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ ) lowerCamelCase_ = t if not torch.is_tensor(A_ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowerCamelCase_ = latent_model_input.device.type == 'mps' if isinstance(A_ , A_ ): lowerCamelCase_ = torch.floataa if is_mps else torch.floataa else: lowerCamelCase_ = torch.intaa if is_mps else torch.intaa lowerCamelCase_ = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowerCamelCase_ = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowerCamelCase_ = self.transformer( A_ , timestep=A_ , class_labels=A_ ).sample # perform guidance if guidance_scale > 1: lowerCamelCase_ , lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , len(A_ ) // 2 , dim=0 ) lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 ) lowerCamelCase_ = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , A_ , dim=1 ) else: lowerCamelCase_ = noise_pred # compute previous image: x_t -> x_t-1 lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample if guidance_scale > 1: lowerCamelCase_ , lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 ) else: lowerCamelCase_ = latent_model_input lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents lowerCamelCase_ = self.vae.decode(A_ ).sample lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(A_ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=A_ )
651
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowerCamelCase : List[Any] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowerCamelCase : Tuple = logging.getLogger() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('-f' ) lowerCamelCase_ = parser.parse_args() return args.f def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ): '''simple docstring''' lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase , 'r' ) as f: return json.load(lowercase ) raise ValueError(f"""can't find {path}""" ) lowerCamelCase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_glue.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) @slow def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_clm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 100 ) @slow def a__ ( self : str ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A_ , 'argv' , A_ ): run_summarization_flax.main() lowerCamelCase_ = get_results(A_ , split='test' ) self.assertGreaterEqual(result['test_rouge1'] , 10 ) self.assertGreaterEqual(result['test_rouge2'] , 2 ) self.assertGreaterEqual(result['test_rougeL'] , 7 ) self.assertGreaterEqual(result['test_rougeLsum'] , 7 ) @slow def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 42 ) @slow def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_ta_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.42 ) @slow def a__ ( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2 lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_ner.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertGreaterEqual(result['eval_f1'] , 0.3 ) @slow def a__ ( self : str ) -> int: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_qa.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_f1'] , 30 ) self.assertGreaterEqual(result['eval_exact'] , 30 )
651
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _SCREAMING_SNAKE_CASE ( lowercase : Dict ): '''simple docstring''' lowerCamelCase_ = fname.split(os.path.sep )[-1] return re.search(r'^(.*)_\d+\.jpg$' , lowercase ).groups()[0] class A( UpperCamelCase ): '''simple docstring''' def __init__( self : int , A_ : List[Any] , A_ : Union[str, Any]=None , A_ : str=None ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = file_names lowerCamelCase_ = image_transform lowerCamelCase_ = label_to_id def __len__( self : List[str] ) -> Optional[int]: """simple docstring""" return len(self.file_names ) def __getitem__( self : int , A_ : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.file_names[idx] lowerCamelCase_ = PIL.Image.open(A_ ) lowerCamelCase_ = raw_image.convert('RGB' ) if self.image_transform is not None: lowerCamelCase_ = self.image_transform(A_ ) lowerCamelCase_ = extract_label(A_ ) if self.label_to_id is not None: lowerCamelCase_ = self.label_to_id[label] return {"image": image, "label": label} def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : List[Any] ): '''simple docstring''' if args.with_tracking: lowerCamelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ = config['lr'] lowerCamelCase_ = int(config['num_epochs'] ) lowerCamelCase_ = int(config['seed'] ) lowerCamelCase_ = int(config['batch_size'] ) lowerCamelCase_ = config['image_size'] if not isinstance(lowercase , (list, tuple) ): lowerCamelCase_ = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": lowerCamelCase_ = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): lowerCamelCase_ = int(args.checkpointing_steps ) else: raise ValueError( f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: lowerCamelCase_ = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: lowerCamelCase_ = os.path.split(lowercase )[-1].split('.' )[0] accelerator.init_trackers(lowercase , lowercase ) # Grab all the image filenames lowerCamelCase_ = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences lowerCamelCase_ = [extract_label(lowercase ) for fname in file_names] lowerCamelCase_ = list(set(lowercase ) ) id_to_label.sort() lowerCamelCase_ = {lbl: i for i, lbl in enumerate(lowercase )} # Set the seed before splitting the data. np.random.seed(lowercase ) torch.manual_seed(lowercase ) torch.cuda.manual_seed_all(lowercase ) # Split our filenames between train and validation lowerCamelCase_ = np.random.permutation(len(lowercase ) ) lowerCamelCase_ = int(0.8 * len(lowercase ) ) lowerCamelCase_ = random_perm[:cut] lowerCamelCase_ = random_perm[cut:] # For training we use a simple RandomResizedCrop lowerCamelCase_ = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] ) lowerCamelCase_ = PetsDataset( [file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase ) # For evaluation, we use a deterministic Resize lowerCamelCase_ = Compose([Resize(lowercase ), ToTensor()] ) lowerCamelCase_ = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase ) # Instantiate dataloaders. lowerCamelCase_ = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 ) lowerCamelCase_ = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ = create_model('resnet50d' , pretrained=lowercase , num_classes=len(lowercase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): lowerCamelCase_ = False for param in model.get_classifier().parameters(): lowerCamelCase_ = True # We normalize the batches of images to be a bit faster. lowerCamelCase_ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) lowerCamelCase_ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler lowerCamelCase_ = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # We need to keep track of how many total steps we have iterated over lowerCamelCase_ = 0 # We also need to keep track of the starting epoch so files are named properly lowerCamelCase_ = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) lowerCamelCase_ = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint lowerCamelCase_ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) lowerCamelCase_ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` lowerCamelCase_ = os.path.splitext(lowercase )[0] if "epoch" in training_difference: lowerCamelCase_ = int(training_difference.replace('epoch_' , '' ) ) + 1 lowerCamelCase_ = None else: lowerCamelCase_ = int(training_difference.replace('step_' , '' ) ) lowerCamelCase_ = resume_step // len(lowercase ) resume_step -= starting_epoch * len(lowercase ) # Now we train the model for epoch in range(lowercase , lowercase ): model.train() if args.with_tracking: lowerCamelCase_ = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step lowerCamelCase_ = accelerator.skip_first_batches(lowercase , lowercase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader lowerCamelCase_ = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. lowerCamelCase_ = {k: v.to(accelerator.device ) for k, v in batch.items()} lowerCamelCase_ = (batch['image'] - mean) / std lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = torch.nn.functional.cross_entropy(lowercase , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(lowercase , lowercase ): lowerCamelCase_ = f"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: lowerCamelCase_ = os.path.join(args.output_dir , lowercase ) accelerator.save_state(lowercase ) model.eval() lowerCamelCase_ = 0 lowerCamelCase_ = 0 for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. lowerCamelCase_ = {k: v.to(accelerator.device ) for k, v in batch.items()} lowerCamelCase_ = (batch['image'] - mean) / std with torch.no_grad(): lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = outputs.argmax(dim=-1 ) lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch['label']) ) lowerCamelCase_ = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() lowerCamelCase_ = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}: {1_00 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { 'accuracy': 1_00 * eval_metric, 'train_loss': total_loss.item() / len(lowercase ), 'epoch': epoch, } , step=lowercase , ) if checkpointing_steps == "epoch": lowerCamelCase_ = f"""epoch_{epoch}""" if args.output_dir is not None: lowerCamelCase_ = os.path.join(args.output_dir , lowercase ) accelerator.save_state(lowercase ) if args.with_tracking: accelerator.end_training() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=lowercase , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=lowercase , default=lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=lowercase , default=lowercase , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
651
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = None UpperCamelCase = None lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess") def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right ) lowerCamelCase_ = 1 - left_distrib_excess lowerCamelCase_ = 1 - right_distrib_excess lowerCamelCase_ = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) lowerCamelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
651
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { "google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json", } class A( UpperCamelCase , UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''bit''' UpperCamelCase = ['''preactivation''', '''bottleneck'''] UpperCamelCase = ['''SAME''', '''VALID'''] def __init__( self : Optional[Any] , A_ : int=3 , A_ : Any=64 , A_ : Optional[int]=[256, 512, 1024, 2048] , A_ : str=[3, 4, 6, 3] , A_ : Any="preactivation" , A_ : Optional[int]="relu" , A_ : List[Any]=None , A_ : Tuple=32 , A_ : List[str]=0.0 , A_ : str=False , A_ : Optional[int]=32 , A_ : List[str]=1 , A_ : List[str]=None , A_ : str=None , **A_ : int , ) -> Union[str, Any]: """simple docstring""" super().__init__(**A_ ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: lowerCamelCase_ = global_padding.upper() else: raise ValueError(f"""Padding strategy {global_padding} not supported""" ) lowerCamelCase_ = num_channels lowerCamelCase_ = embedding_size lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = layer_type lowerCamelCase_ = hidden_act lowerCamelCase_ = global_padding lowerCamelCase_ = num_groups lowerCamelCase_ = drop_path_rate lowerCamelCase_ = embedding_dynamic_padding lowerCamelCase_ = output_stride lowerCamelCase_ = width_factor lowerCamelCase_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )] lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
651
from manim import * class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('CPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(4 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('GPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) gpu.move_to([-1, -1, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Model' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) model.move_to([3, -1.0, 0] ) self.add(A_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for i, rect in enumerate(A_ ): lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 ) target.move_to(A_ ) model_arr.append(A_ ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(A_ ) self.add(*A_ , *A_ ) lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Disk' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) disk.move_to([-4, -1.25, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(A_ ) lowerCamelCase_ = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ ) ) lowerCamelCase_ = Square(0.3 ) input.set_fill(A_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , A_ , buff=0.5 ) self.play(Write(A_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 ) self.play(MoveToTarget(A_ ) ) self.play(FadeOut(A_ ) ) lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , A_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCamelCase_ = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) ) lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCamelCase_ = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCamelCase_ = AnimationGroup( FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(A_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCamelCase_ = 0.7 self.play( Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCamelCase_ = a_c lowerCamelCase_ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , ) lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) ) self.wait()
651
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ): '''simple docstring''' lowerCamelCase_ = 'huggingface/label-files' lowerCamelCase_ = 'imagenet-1k-id2label.json' lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) ) lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()} lowerCamelCase_ = {v: k for k, v in idalabel.items()} lowerCamelCase_ = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowerCamelCase_ = BitConfig( conv_layer=lowercase , num_labels=10_00 , idalabel=lowercase , labelaid=lowercase , ) return config def _SCREAMING_SNAKE_CASE ( lowercase : Dict ): '''simple docstring''' if "stem.conv" in name: lowerCamelCase_ = name.replace('stem.conv' , 'bit.embedder.convolution' ) if "blocks" in name: lowerCamelCase_ = name.replace('blocks' , 'layers' ) if "head.fc" in name: lowerCamelCase_ = name.replace('head.fc' , 'classifier.1' ) if name.startswith('norm' ): lowerCamelCase_ = 'bit.' + name if "bit" not in name and "classifier" not in name: lowerCamelCase_ = 'bit.encoder.' + name return name def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : Dict=False ): '''simple docstring''' lowerCamelCase_ = get_config(lowercase ) # load original model from timm lowerCamelCase_ = create_model(lowercase , pretrained=lowercase ) timm_model.eval() # load state_dict of original model lowerCamelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): lowerCamelCase_ = state_dict.pop(lowercase ) lowerCamelCase_ = val.squeeze() if 'head' in key else val # load HuggingFace model lowerCamelCase_ = BitForImageClassification(lowercase ) model.eval() model.load_state_dict(lowercase ) # create image processor lowerCamelCase_ = create_transform(**resolve_data_config({} , model=lowercase ) ) lowerCamelCase_ = transform.transforms lowerCamelCase_ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } lowerCamelCase_ = BitImageProcessor( do_resize=lowercase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = transform(lowercase ).unsqueeze(0 ) lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(lowercase , lowercase ) # verify logits with torch.no_grad(): lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = outputs.logits print('Logits:' , logits[0, :3] ) print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] ) lowerCamelCase_ = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase , outputs.logits , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
651
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' return EnvironmentCommand() class A( UpperCamelCase ): '''simple docstring''' @staticmethod def a__ ( A_ : ArgumentParser ) -> str: """simple docstring""" lowerCamelCase_ = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = 'not installed' lowerCamelCase_ = 'NA' if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = 'not installed' if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = 'not installed' if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = 'not installed' if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def a__ ( A_ : Dict ) -> Any: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
651
1
from __future__ import annotations import collections import pprint from pathlib import Path def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' return "".join(sorted(lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' return word_by_signature[signature(lowercase )] lowerCamelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") lowerCamelCase : Any = sorted({word.strip().lower() for word in data.splitlines()}) lowerCamelCase : Tuple = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
651
from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 11 lowerCamelCase_ = int('1' + '0' * digit_len ) for num in range(lowercase , lowercase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase , lowercase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCamelCase_ = 10 return solutions def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ): '''simple docstring''' lowerCamelCase_ = 1.0 for fraction in fraction_list(lowercase ): lowerCamelCase_ = Fraction(lowercase ) result *= frac.denominator / frac.numerator return int(lowercase ) if __name__ == "__main__": print(solution())
651
1
from ...configuration_utils import PretrainedConfig lowerCamelCase : List[Any] = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''tapas''' def __init__( self : int , A_ : str=30522 , A_ : Tuple=768 , A_ : List[str]=12 , A_ : int=12 , A_ : Tuple=3072 , A_ : List[Any]="gelu" , A_ : int=0.1 , A_ : Dict=0.1 , A_ : List[Any]=1024 , A_ : Optional[Any]=[3, 256, 256, 2, 256, 256, 10] , A_ : Optional[Any]=0.02 , A_ : Any=1E-12 , A_ : List[Any]=0 , A_ : Optional[int]=10.0 , A_ : int=0 , A_ : Any=1.0 , A_ : Tuple=None , A_ : List[str]=1.0 , A_ : Optional[Any]=False , A_ : Tuple=None , A_ : Optional[Any]=1.0 , A_ : List[str]=1.0 , A_ : int=False , A_ : Optional[Any]=False , A_ : Union[str, Any]="ratio" , A_ : Any=None , A_ : int=None , A_ : str=64 , A_ : Any=32 , A_ : List[Any]=False , A_ : List[Any]=True , A_ : List[str]=False , A_ : Union[str, Any]=False , A_ : str=True , A_ : Tuple=False , A_ : List[Any]=None , A_ : Any=None , **A_ : Tuple , ) -> int: """simple docstring""" super().__init__(pad_token_id=A_ , **A_ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_sizes lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps # Fine-tuning task hyperparameters lowerCamelCase_ = positive_label_weight lowerCamelCase_ = num_aggregation_labels lowerCamelCase_ = aggregation_loss_weight lowerCamelCase_ = use_answer_as_supervision lowerCamelCase_ = answer_loss_importance lowerCamelCase_ = use_normalized_answer_loss lowerCamelCase_ = huber_loss_delta lowerCamelCase_ = temperature lowerCamelCase_ = aggregation_temperature lowerCamelCase_ = use_gumbel_for_cells lowerCamelCase_ = use_gumbel_for_aggregation lowerCamelCase_ = average_approximation_function lowerCamelCase_ = cell_selection_preference lowerCamelCase_ = answer_loss_cutoff lowerCamelCase_ = max_num_rows lowerCamelCase_ = max_num_columns lowerCamelCase_ = average_logits_per_cell lowerCamelCase_ = select_one_column lowerCamelCase_ = allow_empty_column_selection lowerCamelCase_ = init_cell_selection_weights_to_zero lowerCamelCase_ = reset_position_index_per_cell lowerCamelCase_ = disable_per_token_loss # Aggregation hyperparameters lowerCamelCase_ = aggregation_labels lowerCamelCase_ = no_aggregation_label_index if isinstance(self.aggregation_labels , A_ ): lowerCamelCase_ = {int(A_ ): v for k, v in aggregation_labels.items()}
651
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = size if size is not None else {'shortest_edge': 224} lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] ) lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ ) lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ ) def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray: """simple docstring""" return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray: """simple docstring""" return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(A_ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images] lowerCamelCase_ = {'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
651
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = old_name if "patch_embed" in old_name: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = old_name.split('.' ) if layer == "0": lowerCamelCase_ = old_name.replace('0' , 'convolution1' ) elif layer == "1": lowerCamelCase_ = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": lowerCamelCase_ = old_name.replace('3' , 'convolution2' ) else: lowerCamelCase_ = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(r'\d\.\d' , lowercase ): lowerCamelCase_ = r'\b\d{2}\b' if bool(re.search(lowercase , lowercase ) ): lowerCamelCase_ = re.search(r'\d\.\d\d.' , lowercase ).group() else: lowerCamelCase_ = re.search(r'\d\.\d.' , lowercase ).group() if int(match[0] ) < 6: lowerCamelCase_ = old_name.replace(lowercase , '' ) lowerCamelCase_ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) lowerCamelCase_ = 'intermediate_stages.' + trimmed_name else: lowerCamelCase_ = old_name.replace(lowercase , '' ) if int(match[2] ) < num_meta4D_last_stage: lowerCamelCase_ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: lowerCamelCase_ = str(int(match[2] ) - num_meta4D_last_stage ) lowerCamelCase_ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: lowerCamelCase_ = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: lowerCamelCase_ = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: lowerCamelCase_ = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: lowerCamelCase_ = trimmed_name.replace('fc2' , 'linear_out' ) lowerCamelCase_ = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(r'.\d.' , lowercase ): lowerCamelCase_ = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: lowerCamelCase_ = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): lowerCamelCase_ = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): lowerCamelCase_ = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: lowerCamelCase_ = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: lowerCamelCase_ = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: lowerCamelCase_ = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: lowerCamelCase_ = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": lowerCamelCase_ = new_name.replace('norm' , 'layernorm' ) lowerCamelCase_ = 'efficientformer.' + new_name else: lowerCamelCase_ = 'efficientformer.encoder.' + new_name return new_name def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[Any] ): '''simple docstring''' for key in checkpoint.copy().keys(): lowerCamelCase_ = checkpoint.pop(lowercase ) lowerCamelCase_ = val return checkpoint def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return image def _SCREAMING_SNAKE_CASE ( lowercase : Path , lowercase : Path , lowercase : Path , lowercase : bool ): '''simple docstring''' lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )['model'] lowerCamelCase_ = EfficientFormerConfig.from_json_file(lowercase ) lowerCamelCase_ = EfficientFormerForImageClassificationWithTeacher(lowercase ) lowerCamelCase_ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) lowerCamelCase_ = config.depths[-1] - config.num_metaad_blocks + 1 lowerCamelCase_ = convert_torch_checkpoint(lowercase , lowercase ) model.load_state_dict(lowercase ) model.eval() lowerCamelCase_ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image lowerCamelCase_ = prepare_img() lowerCamelCase_ = 2_56 lowerCamelCase_ = 2_24 lowerCamelCase_ = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) lowerCamelCase_ = processor(images=lowercase , return_tensors='pt' ).pixel_values # original processing pipeline lowerCamelCase_ = Compose( [ Resize(lowercase , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(lowercase ), ToTensor(), Normalize(lowercase , lowercase ), ] ) lowerCamelCase_ = image_transforms(lowercase ).unsqueeze(0 ) assert torch.allclose(lowercase , lowercase ) lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = outputs.logits lowerCamelCase_ = (1, 10_00) if "l1" in model_name: lowerCamelCase_ = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowercase , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: lowerCamelCase_ = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowercase , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: lowerCamelCase_ = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowercase ) print(f"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=lowercase , ) processor.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=lowercase , ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) lowerCamelCase : str = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
651
import cva import numpy as np class A: '''simple docstring''' def __init__( self : int , A_ : float , A_ : int ) -> List[Any]: """simple docstring""" if k in (0.04, 0.06): lowerCamelCase_ = k lowerCamelCase_ = window_size else: raise ValueError('invalid k value' ) def __str__( self : str ) -> str: """simple docstring""" return str(self.k ) def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowerCamelCase_ = cva.imread(A_ , 0 ) lowerCamelCase_ , lowerCamelCase_ = img.shape lowerCamelCase_ = [] lowerCamelCase_ = img.copy() lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB ) lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ ) lowerCamelCase_ = dx**2 lowerCamelCase_ = dy**2 lowerCamelCase_ = dx * dy lowerCamelCase_ = 0.04 lowerCamelCase_ = self.window_size // 2 for y in range(A_ , h - offset ): for x in range(A_ , w - offset ): lowerCamelCase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = (wxx * wyy) - (wxy**2) lowerCamelCase_ = wxx + wyy lowerCamelCase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3) lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
651
1
import os def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with open(os.path.dirname(lowercase ) + '/p022_names.txt' ) as file: lowerCamelCase_ = str(file.readlines()[0] ) lowerCamelCase_ = names.replace('"' , '' ).split(',' ) names.sort() lowerCamelCase_ = 0 lowerCamelCase_ = 0 for i, name in enumerate(lowercase ): for letter in name: name_score += ord(lowercase ) - 64 total_score += (i + 1) * name_score lowerCamelCase_ = 0 return total_score if __name__ == "__main__": print(solution())
651
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCamelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.encoder ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(A_ ): try: lowerCamelCase_ = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = new_word if len(A_ ) == 1: break else: lowerCamelCase_ = get_pairs(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = word return word def a__ ( self : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , A_ ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : str ) -> Optional[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def a__ ( self : Tuple , A_ : Dict ) -> List[Any]: """simple docstring""" return self.decoder.get(A_ ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ''.join(A_ ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) lowerCamelCase_ = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs) def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict: """simple docstring""" return token_ids_a + [self.eos_token_id] def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = self.encode(A_ ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
651
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[str] = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''gpt_neox_japanese''' def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_multiple_size lowerCamelCase_ = hidden_act lowerCamelCase_ = rotary_pct lowerCamelCase_ = rotary_emb_base lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = use_cache lowerCamelCase_ = attention_dropout lowerCamelCase_ = hidden_dropout
651
lowerCamelCase : Dict = "Alexander Joslin" import operator as op from .stack import Stack def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowercase ) ) elif i in operators: # RULE 2 operator_stack.push(lowercase ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](lowercase , lowercase ) operand_stack.push(lowercase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
651
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = ['a', 'b', 'c'] # Defaults to last layer if both are None lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(A_ , A_ , A_ ) self.assertEqual(A_ , ['c'] ) self.assertEqual(A_ , [2] ) # Out indices set to match out features lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(['a', 'c'] , A_ , A_ ) self.assertEqual(A_ , ['a', 'c'] ) self.assertEqual(A_ , [0, 2] ) # Out features set to match out indices lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(A_ , [0, 2] , A_ ) self.assertEqual(A_ , ['a', 'c'] ) self.assertEqual(A_ , [0, 2] ) # Out features selected from negative indices lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(A_ , [-3, -1] , A_ ) self.assertEqual(A_ , ['a', 'c'] ) self.assertEqual(A_ , [-3, -1] ) def a__ ( self : Any ) -> List[Any]: """simple docstring""" with self.assertRaises(A_ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , A_ ) # Out features must be a list with self.assertRaises(A_ ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(A_ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(A_ ): verify_out_features_out_indices(A_ , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(A_ ): verify_out_features_out_indices(A_ , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(A_ ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(A_ ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(A_ ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def a__ ( self : List[Any] ) -> int: """simple docstring""" lowerCamelCase_ = BackboneMixin() lowerCamelCase_ = ['a', 'b', 'c'] lowerCamelCase_ = ['a', 'c'] lowerCamelCase_ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCamelCase_ = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCamelCase_ = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
651
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) print('The following activities are selected:' ) # The first activity is always selected lowerCamelCase_ = 0 print(lowercase , end=',' ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end=',' ) lowerCamelCase_ = j if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5] lowerCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
651
1
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class A( UpperCamelCase ): '''simple docstring''' def __init__( self : int , A_ : Any , A_ : Any=13 , A_ : Union[str, Any]=7 , A_ : Dict=True , A_ : List[Any]=True , A_ : Dict=True , A_ : Optional[int]=True , A_ : Any=99 , A_ : Tuple=32 , A_ : List[str]=5 , A_ : List[str]=4 , A_ : Dict=37 , A_ : List[str]="gelu" , A_ : Any=0.1 , A_ : str=0.1 , A_ : List[str]=512 , A_ : Optional[int]=16 , A_ : Dict=2 , A_ : Tuple=0.02 , A_ : Tuple=False , A_ : Optional[int]=True , A_ : Union[str, Any]="None" , A_ : int=3 , A_ : int=4 , A_ : Any=None , ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = relative_attention lowerCamelCase_ = position_biased_input lowerCamelCase_ = pos_att_type lowerCamelCase_ = scope def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_config() lowerCamelCase_ = 300 return config def a__ ( self : Any , A_ : Optional[int] ) -> List[Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def a__ ( self : int , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : Any , A_ : str , A_ : List[Any] , A_ : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = DebertaModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] lowerCamelCase_ = model(A_ , token_type_ids=A_ )[0] lowerCamelCase_ = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def a__ ( self : Dict , A_ : List[Any] , A_ : Optional[int] , A_ : int , A_ : Any , A_ : int , A_ : Any , A_ : int ) -> Dict: """simple docstring""" lowerCamelCase_ = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : Optional[int] , A_ : List[str] , A_ : Tuple , A_ : str , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any] ) -> Any: """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def a__ ( self : Tuple , A_ : Optional[Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any] , A_ : str , A_ : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : str , A_ : str , A_ : Tuple , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : int , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : str ) -> List[str]: """simple docstring""" lowerCamelCase_ = DebertaModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def a__ ( self : Dict ) -> Tuple: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class A( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" pass @slow def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = DebertaModel.from_pretrained('microsoft/deberta-base' ) lowerCamelCase_ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) lowerCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. lowerCamelCase_ = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
651
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride lowerCamelCase_ = out_features lowerCamelCase_ = out_indices def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : List[Any] ) -> Any: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = FocalNetModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase_ = None lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) UpperCamelCase = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self : Any ) -> Optional[int]: """simple docstring""" return def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def a__ ( self : int ) -> int: """simple docstring""" pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A_ ) , A_ ) # FocalNet has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape lowerCamelCase_ = ( reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) @slow def a__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = FocalNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(A_ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=A_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**A_ ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else () UpperCamelCase = FocalNetConfig UpperCamelCase = False def a__ ( self : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self )
651
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase : int = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ ) self.assertEqual( A_ , [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ] , ) lowerCamelCase_ = text_generator.model.config.eos_token_id lowerCamelCase_ = '<pad>' lowerCamelCase_ = text_generator( ['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , ) self.assertEqual( A_ , [ [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], ] , ) @require_tf def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ ) return text_generator, ["This is a test", "Another test"] def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = 'Hello I believe in' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) lowerCamelCase_ = text_generator(A_ ) self.assertEqual( A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' ) self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] ) def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = text_generator.model lowerCamelCase_ = text_generator.tokenizer lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ ) lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase_ = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase_ = text_generator('' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase_ = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500 , max_new_tokens=20 ) lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(A_ ): text_generator( 'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" import torch # Classic `model_kwargs` lowerCamelCase_ = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def a__ ( self : int ) -> str: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : List[Any] ) -> Dict: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=A_ , top_p=0.5 ) def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = 'Hello world' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' ) else: lowerCamelCase_ = logging.get_logger('transformers.generation.utils' ) lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 ) self.assertIn(A_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 ) self.assertNotIn(A_ , cl.out ) with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 ) self.assertNotIn(A_ , cl.out )
651
1
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : Tuple , ): '''simple docstring''' lowerCamelCase_ = { '7z': (seven_zip_file, SevenZipExtractor), 'bz2': (bza_file, BzipaExtractor), 'gzip': (gz_file, GzipExtractor), 'lz4': (lza_file, LzaExtractor), 'tar': (tar_file, TarExtractor), 'xz': (xz_file, XzExtractor), 'zip': (zip_file, ZipExtractor), 'zstd': (zstd_file, ZstdExtractor), } lowerCamelCase_ , lowerCamelCase_ = input_paths_and_base_extractors[compression_format] if input_path is None: lowerCamelCase_ = f"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase ) assert base_extractor.is_extractable(lowercase ) lowerCamelCase_ = tmp_path / ('extracted' if is_archive else 'extracted.txt') base_extractor.extract(lowercase , lowercase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowerCamelCase_ = file_path.read_text(encoding='utf-8' ) else: lowerCamelCase_ = output_path.read_text(encoding='utf-8' ) lowerCamelCase_ = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : str , lowercase : Any , lowercase : Dict , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Tuple , lowercase : Dict , lowercase : Tuple , ): '''simple docstring''' lowerCamelCase_ = { '7z': seven_zip_file, 'bz2': bza_file, 'gzip': gz_file, 'lz4': lza_file, 'tar': tar_file, 'xz': xz_file, 'zip': zip_file, 'zstd': zstd_file, } lowerCamelCase_ = input_paths[compression_format] if input_path is None: lowerCamelCase_ = f"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase ) lowerCamelCase_ = Extractor.infer_extractor_format(lowercase ) assert extractor_format is not None lowerCamelCase_ = tmp_path / ('extracted' if is_archive else 'extracted.txt') Extractor.extract(lowercase , lowercase , lowercase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowerCamelCase_ = file_path.read_text(encoding='utf-8' ) else: lowerCamelCase_ = output_path.read_text(encoding='utf-8' ) lowerCamelCase_ = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.fixture def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str ): '''simple docstring''' import tarfile lowerCamelCase_ = tmp_path / 'data_dot_dot' directory.mkdir() lowerCamelCase_ = directory / 'tar_file_with_dot_dot.tar' with tarfile.TarFile(lowercase , 'w' ) as f: f.add(lowercase , arcname=os.path.join('..' , text_file.name ) ) return path @pytest.fixture def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' import tarfile lowerCamelCase_ = tmp_path / 'data_sym_link' directory.mkdir() lowerCamelCase_ = directory / 'tar_file_with_sym_link.tar' os.symlink('..' , directory / 'subdir' , target_is_directory=lowercase ) with tarfile.TarFile(lowercase , 'w' ) as f: f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( 'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , ) def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Tuple , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str ): '''simple docstring''' lowerCamelCase_ = { 'tar_file_with_dot_dot': tar_file_with_dot_dot, 'tar_file_with_sym_link': tar_file_with_sym_link, } lowerCamelCase_ = insecure_tar_files[insecure_tar_file] lowerCamelCase_ = tmp_path / 'extracted' TarExtractor.extract(lowercase , lowercase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = tmpdir / 'not_a_zip_file' # From: https://github.com/python/cpython/pull/5053 lowerCamelCase_ = ( b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00' b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I' b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07' b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82' ) with not_a_zip_file.open('wb' ) as f: f.write(lowercase ) assert zipfile.is_zipfile(str(lowercase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(lowercase ) # but we're right
651
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) lowerCamelCase_ = self.diffusers_dir shutil.copy( os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int: """simple docstring""" lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCamelCase_ = black.format_str(A_ , mode=A_ ) lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(A_ , 'w' , newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A_ ) with open(A_ , 'r' ) as f: self.assertTrue(f.read() , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , ) # Copy consistency with a really long name lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
651
1
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Any , A_ : Tuple , A_ : List[str]=3 , A_ : List[str]=32 , A_ : Optional[Any]=3 , A_ : Optional[Any]=10 , A_ : Dict=[10, 20, 30, 40] , A_ : Tuple=[1, 1, 2, 1] , A_ : List[Any]=True , A_ : Any=True , A_ : Optional[Any]="relu" , A_ : List[str]=3 , A_ : int=None , ) -> Tuple: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = embeddings_size lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_act lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = len(A_ ) def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : Dict ) -> Dict: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def a__ ( self : List[Any] , A_ : Optional[Any] , A_ : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = RegNetModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self : str , A_ : Dict , A_ : List[Any] , A_ : Dict ) -> Any: """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = RegNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = RegNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ ) def a__ ( self : str ) -> Optional[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self : Tuple ) -> Dict: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def a__ ( self : str ) -> Any: """simple docstring""" pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def a__ ( self : Any ) -> Tuple: """simple docstring""" pass def a__ ( self : Any ) -> Optional[int]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : int ) -> int: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=A_ ) for name, module in model.named_modules(): if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def a__ ( self : int ) -> List[Any]: """simple docstring""" def check_hidden_states_output(A_ : Any , A_ : Tuple , A_ : List[str] ): lowerCamelCase_ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(A_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase_ = layer_type lowerCamelCase_ = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(A_ , A_ , A_ ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = RegNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**A_ ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
651
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = None ops.enable_eager_execution_internal() lowerCamelCase_ = tf.config.list_physical_devices('CPU' ) if len(A_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' ) lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ = GradientAccumulator() lowerCamelCase_ = tf.Variable([4.0, 3.0] ) lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 ) lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ ) def accumulate_on_replica(A_ : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(A_ : List[Any] , A_ : Tuple ): with strategy.scope(): lowerCamelCase_ = strategy.experimental_local_results(A_ ) local_variables[0].assign(A_ ) local_variables[1].assign(A_ ) strategy.run(A_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A_ ) def _check_local_values(A_ : List[Any] , A_ : str ): lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
651
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCamelCase : Dict = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ): '''simple docstring''' config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowerCamelCase_ = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(lowercase , id=lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] ): '''simple docstring''' if exitstatus == 5: lowerCamelCase_ = 0 # Doctest custom flag to ignore output. lowerCamelCase : Union[str, Any] = doctest.register_optionflag("IGNORE_RESULT") lowerCamelCase : Dict = doctest.OutputChecker class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Tuple , A_ : Union[str, Any] , A_ : str , A_ : Union[str, Any] ) -> List[str]: """simple docstring""" if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , A_ , A_ , A_ ) lowerCamelCase : List[Any] = CustomOutputChecker lowerCamelCase : Optional[int] = HfDoctestModule lowerCamelCase : int = HfDocTestParser
651
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg") lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = cn.convert_to_negative(lowercase ) # assert negative_img array for at least one True assert negative_img.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase_ = canny.canny(lowercase ) # assert canny array for at least one True assert canny_array.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase ) assert res.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert med.median_filter(lowercase , 3 ).any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase ) assert grad.any() and theta.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = sp.make_sepia(lowercase , 20 ) assert sepia.all() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCamelCase_ = imread(lowercase , 0 ) # Test for get_neighbors_pixel function() return not None lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = image[x_coordinate][y_coordinate] lowerCamelCase_ = lbp.get_neighbors_pixel( lowercase , lowercase , lowercase , lowercase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase ) assert lbp_image.any()
651
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCamelCase : str = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : int=False ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = create_model( 'HTSAT-tiny' , 'roberta' , lowercase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowercase , fusion_type='aff_2d' if enable_fusion else None , ) return model, model_cfg def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = {} lowerCamelCase_ = r'.*sequential.(\d+).*' lowerCamelCase_ = r'.*_projection.(\d+).*' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCamelCase_ = key.replace(lowercase , lowercase ) if re.match(lowercase , lowercase ): # replace sequential layers with list lowerCamelCase_ = re.match(lowercase , lowercase ).group(1 ) lowerCamelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowercase )//3}.linear.""" ) elif re.match(lowercase , lowercase ): lowerCamelCase_ = int(re.match(lowercase , lowercase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... lowerCamelCase_ = 1 if projecton_layer == 0 else 2 lowerCamelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value lowerCamelCase_ = value lowerCamelCase_ = mixed_qkv.size(0 ) // 3 lowerCamelCase_ = mixed_qkv[:qkv_dim] lowerCamelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2] lowerCamelCase_ = mixed_qkv[qkv_dim * 2 :] lowerCamelCase_ = query_layer lowerCamelCase_ = key_layer lowerCamelCase_ = value_layer else: lowerCamelCase_ = value return model_state_dict def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : int , lowercase : List[Any]=False ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = init_clap(lowercase , enable_fusion=lowercase ) clap_model.eval() lowerCamelCase_ = clap_model.state_dict() lowerCamelCase_ = rename_state_dict(lowercase ) lowerCamelCase_ = ClapConfig() lowerCamelCase_ = enable_fusion lowerCamelCase_ = ClapModel(lowercase ) # ignore the spectrogram embedding layer model.load_state_dict(lowercase , strict=lowercase ) model.save_pretrained(lowercase ) transformers_config.save_pretrained(lowercase ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") lowerCamelCase : List[str] = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
651
class A: '''simple docstring''' def __init__( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = {} def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int: """simple docstring""" if vertex not in self.adjacency: lowerCamelCase_ = {} self.num_vertices += 1 def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple: """simple docstring""" self.add_vertex(A_ ) self.add_vertex(A_ ) if head == tail: return lowerCamelCase_ = weight lowerCamelCase_ = weight def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for i in range(len(A_ ) ): lowerCamelCase_ = list(edges[i] ) edges.sort(key=lambda A_ : e[2] ) for i in range(len(A_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowerCamelCase_ = edges[i][2] + 1 for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = weight lowerCamelCase_ = weight def __str__( self : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = '' for tail in self.adjacency: for head in self.adjacency[tail]: lowerCamelCase_ = self.adjacency[head][tail] string += f"""{head} -> {tail} == {weight}\n""" return string.rstrip('\n' ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a__ ( self : List[str] ) -> int: """simple docstring""" return self.adjacency.keys() @staticmethod def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]: """simple docstring""" lowerCamelCase_ = Graph() if vertices is None: lowerCamelCase_ = [] if edges is None: lowerCamelCase_ = [] for vertex in vertices: g.add_vertex(A_ ) for edge in edges: g.add_edge(*A_ ) return g class A: '''simple docstring''' def __init__( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = {} lowerCamelCase_ = {} def __len__( self : Any ) -> List[str]: """simple docstring""" return len(self.parent ) def a__ ( self : List[str] , A_ : Any ) -> Dict: """simple docstring""" if item in self.parent: return self.find(A_ ) lowerCamelCase_ = item lowerCamelCase_ = 0 return item def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]: """simple docstring""" if item not in self.parent: return self.make_set(A_ ) if item != self.parent[item]: lowerCamelCase_ = self.find(self.parent[item] ) return self.parent[item] def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.find(A_ ) lowerCamelCase_ = self.find(A_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] < self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowerCamelCase_ = roota return roota return None @staticmethod def a__ ( A_ : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = graph.num_vertices lowerCamelCase_ = Graph.UnionFind() lowerCamelCase_ = [] while num_components > 1: lowerCamelCase_ = {} for vertex in graph.get_vertices(): lowerCamelCase_ = -1 lowerCamelCase_ = graph.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = union_find.find(A_ ) lowerCamelCase_ = union_find.find(A_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex] if union_find.find(A_ ) != union_find.find(A_ ): union_find.union(A_ , A_ ) mst_edges.append(cheap_edge[vertex] ) lowerCamelCase_ = num_components - 1 lowerCamelCase_ = Graph.build(edges=A_ ) return mst
651
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput lowerCamelCase : str = 8 def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any]=BITS ): '''simple docstring''' lowerCamelCase_ = x.device lowerCamelCase_ = (x * 2_55).int().clamp(0 , 2_55 ) lowerCamelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase ) lowerCamelCase_ = rearrange(lowercase , 'd -> d 1 1' ) lowerCamelCase_ = rearrange(lowercase , 'b c h w -> b c 1 h w' ) lowerCamelCase_ = ((x & mask) != 0).float() lowerCamelCase_ = rearrange(lowercase , 'b c d h w -> b (c d) h w' ) lowerCamelCase_ = bits * 2 - 1 return bits def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[int]=BITS ): '''simple docstring''' lowerCamelCase_ = x.device lowerCamelCase_ = (x > 0).int() lowerCamelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase , dtype=torch.intaa ) lowerCamelCase_ = rearrange(lowercase , 'd -> d 1 1' ) lowerCamelCase_ = rearrange(lowercase , 'b (c d) h w -> b c d h w' , d=8 ) lowerCamelCase_ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 2_55).clamp(0.0 , 1.0 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : float = 0.0 , lowercase : bool = True , lowercase : Dict=None , lowercase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) lowerCamelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas lowerCamelCase_ = self.alphas_cumprod[timestep] lowerCamelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod lowerCamelCase_ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" lowerCamelCase_ = self.bit_scale if self.config.clip_sample: lowerCamelCase_ = torch.clamp(lowercase , -scale , lowercase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) lowerCamelCase_ = self._get_variance(lowercase , lowercase ) lowerCamelCase_ = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide lowerCamelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 lowerCamelCase_ = model_output.device if torch.is_tensor(lowercase ) else 'cpu' lowerCamelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase ).to(lowercase ) lowerCamelCase_ = self._get_variance(lowercase , lowercase ) ** 0.5 * eta * noise lowerCamelCase_ = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : Tuple="epsilon" , lowercase : Any=None , lowercase : bool = True , ): '''simple docstring''' lowerCamelCase_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: lowerCamelCase_ , lowerCamelCase_ = torch.split(lowercase , sample.shape[1] , dim=1 ) else: lowerCamelCase_ = None # 1. compute alphas, betas lowerCamelCase_ = self.alphas_cumprod[t] lowerCamelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one lowerCamelCase_ = 1 - alpha_prod_t lowerCamelCase_ = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": lowerCamelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": lowerCamelCase_ = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" lowerCamelCase_ = self.bit_scale if self.config.clip_sample: lowerCamelCase_ = torch.clamp(lowercase , -scale , lowercase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCamelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t lowerCamelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCamelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCamelCase_ = 0 if t > 0: lowerCamelCase_ = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase ).to(model_output.device ) lowerCamelCase_ = (self._get_variance(lowercase , predicted_variance=lowercase ) ** 0.5) * noise lowerCamelCase_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase ) class A( UpperCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, DDPMScheduler] , A_ : Optional[float] = 1.0 , ) -> Union[str, Any]: """simple docstring""" super().__init__() lowerCamelCase_ = bit_scale lowerCamelCase_ = ( ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self : Optional[int] , A_ : Optional[int] = 256 , A_ : Optional[int] = 256 , A_ : Optional[int] = 50 , A_ : Optional[torch.Generator] = None , A_ : Optional[int] = 1 , A_ : Optional[str] = "pil" , A_ : bool = True , **A_ : Any , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" lowerCamelCase_ = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=A_ , ) lowerCamelCase_ = decimal_to_bits(A_ ) * self.bit_scale lowerCamelCase_ = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual lowerCamelCase_ = self.unet(A_ , A_ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample lowerCamelCase_ = bits_to_decimal(A_ ) if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
651
def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 0 for i in range(1 , 10_01 ): total += i**i return str(lowercase )[-10:] if __name__ == "__main__": print(solution())
651
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Tuple = "▁" lowerCamelCase : int = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } lowerCamelCase : Dict = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } lowerCamelCase : str = { "facebook/s2t-small-librispeech-asr": 1_024, } lowerCamelCase : Optional[Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] lowerCamelCase : Tuple = {"mustc": MUSTC_LANGS} class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = MAX_MODEL_INPUT_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] UpperCamelCase = [] def __init__( self : Tuple , A_ : Optional[int] , A_ : List[Any] , A_ : Tuple="<s>" , A_ : Any="</s>" , A_ : str="<pad>" , A_ : Tuple="<unk>" , A_ : List[Any]=False , A_ : Union[str, Any]=False , A_ : Union[str, Any]=None , A_ : Tuple=None , A_ : Optional[Dict[str, Any]] = None , **A_ : int , ) -> None: """simple docstring""" lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , do_upper_case=A_ , do_lower_case=A_ , tgt_lang=A_ , lang_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) lowerCamelCase_ = do_upper_case lowerCamelCase_ = do_lower_case lowerCamelCase_ = load_json(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = spm_file lowerCamelCase_ = load_spm(A_ , self.sp_model_kwargs ) if lang_codes is not None: lowerCamelCase_ = lang_codes lowerCamelCase_ = LANGUAGES[lang_codes] lowerCamelCase_ = [f"""<lang:{lang}>""" for lang in self.langs] lowerCamelCase_ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} lowerCamelCase_ = self.lang_tokens lowerCamelCase_ = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: lowerCamelCase_ = {} @property def a__ ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) @property def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" return self._tgt_lang @tgt_lang.setter def a__ ( self : Dict , A_ : List[str] ) -> None: """simple docstring""" lowerCamelCase_ = new_tgt_lang self.set_tgt_lang_special_tokens(A_ ) def a__ ( self : Any , A_ : str ) -> None: """simple docstring""" lowerCamelCase_ = self.lang_code_to_id[tgt_lang] lowerCamelCase_ = [lang_code_id] def a__ ( self : Any , A_ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(A_ , out_type=A_ ) def a__ ( self : Optional[int] , A_ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder[self.unk_token] ) def a__ ( self : Optional[int] , A_ : int ) -> str: """simple docstring""" return self.decoder.get(A_ , self.unk_token ) def a__ ( self : List[str] , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = [] lowerCamelCase_ = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: lowerCamelCase_ = self.sp_model.decode(A_ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " lowerCamelCase_ = [] else: current_sub_tokens.append(A_ ) lowerCamelCase_ = self.sp_model.decode(A_ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def a__ ( self : List[str] , A_ : List[str] , A_ : Union[str, Any]=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) lowerCamelCase_ = [1] * len(self.prefix_tokens ) lowerCamelCase_ = [1] if token_ids_a is None: return prefix_ones + ([0] * len(A_ )) + suffix_ones return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def a__ ( self : int ) -> Dict: """simple docstring""" lowerCamelCase_ = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = None return state def __setstate__( self : Union[str, Any] , A_ : Dict ) -> None: """simple docstring""" lowerCamelCase_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCamelCase_ = {} lowerCamelCase_ = load_spm(self.spm_file , self.sp_model_kwargs ) def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowerCamelCase_ = Path(A_ ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" lowerCamelCase_ = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) lowerCamelCase_ = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , A_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , A_ ) elif not os.path.isfile(self.spm_file ): with open(A_ , 'wb' ) as fi: lowerCamelCase_ = self.sp_model.serialized_model_proto() fi.write(A_ ) return (str(A_ ), str(A_ )) def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Dict[str, Any] ): '''simple docstring''' lowerCamelCase_ = sentencepiece.SentencePieceProcessor(**lowercase ) spm.Load(str(lowercase ) ) return spm def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' with open(lowercase , 'r' ) as f: return json.load(lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : str ): '''simple docstring''' with open(lowercase , 'w' ) as f: json.dump(lowercase , lowercase , indent=2 )
651
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"] lowerCamelCase : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : List[Any] = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = ["MaskFormerFeatureExtractor"] lowerCamelCase : List[str] = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] lowerCamelCase : Any = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
651
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCamelCase : int = datasets.logging.get_logger(__name__) lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ): '''simple docstring''' lowerCamelCase_ = {doc: key_lines} lowerCamelCase_ = {doc: sys_lines} lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) if remove_nested: lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ): '''simple docstring''' lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 for name, metric in metrics: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: lowerCamelCase_ = (conll / 3) * 1_00 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: lowerCamelCase_ = line.split()[5] if not parse_col == "-": lowerCamelCase_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A( datasets.Metric ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]: """simple docstring""" lowerCamelCase_ = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: lowerCamelCase_ = util.check_gold_parse_annotation(A_ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase_ = evaluate( key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , ) return score
651
1
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) for i in range(length - 1 ): lowerCamelCase_ = i for k in range(i + 1 , lowercase ): if collection[k] < collection[least]: lowerCamelCase_ = k if least != i: lowerCamelCase_ , lowerCamelCase_ = (collection[i], collection[least]) return collection if __name__ == "__main__": lowerCamelCase : List[str] = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase : Dict = [int(item) for item in user_input.split(",")] print(selection_sort(unsorted))
651
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase ) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''text''': Value('''string''' )} ) UpperCamelCase = Features({} ) UpperCamelCase = "text" @property def a__ ( self : List[Any] ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
651
1
from __future__ import annotations class A: '''simple docstring''' def __init__( self : Any , A_ : str=None ) -> Any: """simple docstring""" lowerCamelCase_ = data lowerCamelCase_ = None def __repr__( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = [] lowerCamelCase_ = self while temp: string_rep.append(f"""{temp.data}""" ) lowerCamelCase_ = temp.next return "->".join(A_ ) def _SCREAMING_SNAKE_CASE ( lowercase : list ): '''simple docstring''' if not elements_list: raise Exception('The Elements List is empty' ) lowerCamelCase_ = lowerCamelCase_ = Node(elements_list[0] ) for i in range(1 , len(lowercase ) ): lowerCamelCase_ = Node(elements_list[i] ) lowerCamelCase_ = current.next return head def _SCREAMING_SNAKE_CASE ( lowercase : Node ): '''simple docstring''' if head_node is not None and isinstance(lowercase , lowercase ): print_reverse(head_node.next ) print(head_node.data ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' from doctest import testmod testmod() lowerCamelCase_ = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(lowercase ) print('Elements in Reverse:' ) print_reverse(lowercase ) if __name__ == "__main__": main()
651
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''new-model''' if is_tf_available(): class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = NewModelConfig @require_tf class A( unittest.TestCase ): '''simple docstring''' @slow def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : int ) -> str: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Any ) -> List[Any]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Tuple ) -> str: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Any: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow @require_tensorflow_probability def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained( A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = copy.deepcopy(model.config ) lowerCamelCase_ = ['FunnelBaseModel'] lowerCamelCase_ = TFAutoModel.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : Any ) -> Tuple: """simple docstring""" try: AutoConfig.register('new-model' , A_ ) lowerCamelCase_ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) auto_class.register(A_ , A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCamelCase_ = BertModelTester(self ).get_config() lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() ) lowerCamelCase_ = auto_class.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = auto_class.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def a__ ( self : int ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'bert-base is not a local folder and is not a valid model identifier' ): lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' ) def a__ ( self : Any ) -> Dict: """simple docstring""" with self.assertRaisesRegex( A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' ) def a__ ( self : str ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def a__ ( self : Any ) -> List[Any]: """simple docstring""" with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
651
1
class A: '''simple docstring''' def __init__( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = {} def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int: """simple docstring""" if vertex not in self.adjacency: lowerCamelCase_ = {} self.num_vertices += 1 def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple: """simple docstring""" self.add_vertex(A_ ) self.add_vertex(A_ ) if head == tail: return lowerCamelCase_ = weight lowerCamelCase_ = weight def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for i in range(len(A_ ) ): lowerCamelCase_ = list(edges[i] ) edges.sort(key=lambda A_ : e[2] ) for i in range(len(A_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowerCamelCase_ = edges[i][2] + 1 for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = weight lowerCamelCase_ = weight def __str__( self : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = '' for tail in self.adjacency: for head in self.adjacency[tail]: lowerCamelCase_ = self.adjacency[head][tail] string += f"""{head} -> {tail} == {weight}\n""" return string.rstrip('\n' ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a__ ( self : List[str] ) -> int: """simple docstring""" return self.adjacency.keys() @staticmethod def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]: """simple docstring""" lowerCamelCase_ = Graph() if vertices is None: lowerCamelCase_ = [] if edges is None: lowerCamelCase_ = [] for vertex in vertices: g.add_vertex(A_ ) for edge in edges: g.add_edge(*A_ ) return g class A: '''simple docstring''' def __init__( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = {} lowerCamelCase_ = {} def __len__( self : Any ) -> List[str]: """simple docstring""" return len(self.parent ) def a__ ( self : List[str] , A_ : Any ) -> Dict: """simple docstring""" if item in self.parent: return self.find(A_ ) lowerCamelCase_ = item lowerCamelCase_ = 0 return item def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]: """simple docstring""" if item not in self.parent: return self.make_set(A_ ) if item != self.parent[item]: lowerCamelCase_ = self.find(self.parent[item] ) return self.parent[item] def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.find(A_ ) lowerCamelCase_ = self.find(A_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] < self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowerCamelCase_ = roota return roota return None @staticmethod def a__ ( A_ : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = graph.num_vertices lowerCamelCase_ = Graph.UnionFind() lowerCamelCase_ = [] while num_components > 1: lowerCamelCase_ = {} for vertex in graph.get_vertices(): lowerCamelCase_ = -1 lowerCamelCase_ = graph.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = union_find.find(A_ ) lowerCamelCase_ = union_find.find(A_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex] if union_find.find(A_ ) != union_find.find(A_ ): union_find.union(A_ , A_ ) mst_edges.append(cheap_edge[vertex] ) lowerCamelCase_ = num_components - 1 lowerCamelCase_ = Graph.build(edges=A_ ) return mst
651
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[str] = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''gpt_neox_japanese''' def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_multiple_size lowerCamelCase_ = hidden_act lowerCamelCase_ = rotary_pct lowerCamelCase_ = rotary_emb_base lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = use_cache lowerCamelCase_ = attention_dropout lowerCamelCase_ = hidden_dropout
651
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCamelCase : Union[str, Any] = open # noqa: we just need to have a builtin inside this module to test it properly
651
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowerCamelCase : List[Any] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowerCamelCase : Tuple = logging.getLogger() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('-f' ) lowerCamelCase_ = parser.parse_args() return args.f def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ): '''simple docstring''' lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase , 'r' ) as f: return json.load(lowercase ) raise ValueError(f"""can't find {path}""" ) lowerCamelCase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_glue.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) @slow def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_clm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 100 ) @slow def a__ ( self : str ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A_ , 'argv' , A_ ): run_summarization_flax.main() lowerCamelCase_ = get_results(A_ , split='test' ) self.assertGreaterEqual(result['test_rouge1'] , 10 ) self.assertGreaterEqual(result['test_rouge2'] , 2 ) self.assertGreaterEqual(result['test_rougeL'] , 7 ) self.assertGreaterEqual(result['test_rougeLsum'] , 7 ) @slow def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 42 ) @slow def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_ta_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.42 ) @slow def a__ ( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2 lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_ner.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertGreaterEqual(result['eval_f1'] , 0.3 ) @slow def a__ ( self : str ) -> int: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_qa.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_f1'] , 30 ) self.assertGreaterEqual(result['eval_exact'] , 30 )
651
1
from math import ceil, sqrt def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ): '''simple docstring''' lowerCamelCase_ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase_ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
651
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = None UpperCamelCase = None lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess") def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right ) lowerCamelCase_ = 1 - left_distrib_excess lowerCamelCase_ = 1 - right_distrib_excess lowerCamelCase_ = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) lowerCamelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
651
1
from itertools import product def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' lowerCamelCase_ = sides_number lowerCamelCase_ = max_face_number * dice_number lowerCamelCase_ = [0] * (max_total + 1) lowerCamelCase_ = 1 lowerCamelCase_ = range(lowercase , max_face_number + 1 ) for dice_numbers in product(lowercase , repeat=lowercase ): lowerCamelCase_ = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ = total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ = 0 lowerCamelCase_ = 9 lowerCamelCase_ = 4 * 9 lowerCamelCase_ = 6 for peter_total in range(lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ = (4**9) * (6**6) lowerCamelCase_ = peter_wins_count / total_games_number lowerCamelCase_ = round(lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
651
from manim import * class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('CPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(4 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('GPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) gpu.move_to([-1, -1, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Model' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) model.move_to([3, -1.0, 0] ) self.add(A_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for i, rect in enumerate(A_ ): lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 ) target.move_to(A_ ) model_arr.append(A_ ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(A_ ) self.add(*A_ , *A_ ) lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Disk' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) disk.move_to([-4, -1.25, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(A_ ) lowerCamelCase_ = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ ) ) lowerCamelCase_ = Square(0.3 ) input.set_fill(A_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , A_ , buff=0.5 ) self.play(Write(A_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 ) self.play(MoveToTarget(A_ ) ) self.play(FadeOut(A_ ) ) lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , A_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCamelCase_ = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) ) lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCamelCase_ = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCamelCase_ = AnimationGroup( FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(A_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCamelCase_ = 0.7 self.play( Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCamelCase_ = a_c lowerCamelCase_ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , ) lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) ) self.wait()
651
1
lowerCamelCase : Union[str, Any] = range(2, 20 + 1) lowerCamelCase : Optional[Any] = [10**k for k in range(ks[-1] + 1)] lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {} def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Any , lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = sum(a_i[j] for j in range(lowercase , len(lowercase ) ) ) lowerCamelCase_ = sum(a_i[j] * base[j] for j in range(min(len(lowercase ) , lowercase ) ) ) lowerCamelCase_ , lowerCamelCase_ = 0, 0 lowerCamelCase_ = n - i lowerCamelCase_ = memo.get(lowercase ) if sub_memo is not None: lowerCamelCase_ = sub_memo.get(lowercase ) if jumps is not None and len(lowercase ) > 0: # find and make the largest jump without going over lowerCamelCase_ = -1 for _k in range(len(lowercase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: lowerCamelCase_ = _k break if max_jump >= 0: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = jumps[max_jump] # since the difference between jumps is cached, add c lowerCamelCase_ = diff + c for j in range(min(lowercase , len(lowercase ) ) ): lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , 10 ) if new_c > 0: add(lowercase , lowercase , lowercase ) else: lowerCamelCase_ = [] else: lowerCamelCase_ = {c: []} lowerCamelCase_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps lowerCamelCase_ , lowerCamelCase_ = next_term(lowercase , k - 1 , i + dn , lowercase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead lowerCamelCase_ , lowerCamelCase_ = compute(lowercase , lowercase , i + dn , lowercase ) diff += _diff dn += terms_jumped lowerCamelCase_ = sub_memo[c] # keep jumps sorted by # of terms skipped lowerCamelCase_ = 0 while j < len(lowercase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(lowercase , (diff, dn, k) ) return (diff, dn) def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] ): '''simple docstring''' if i >= n: return 0, i if k > len(lowercase ): a_i.extend([0 for _ in range(k - len(lowercase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) lowerCamelCase_ = i lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0, 0, 0 for j in range(len(lowercase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 lowerCamelCase_ = ds_c + ds_b diff += addend lowerCamelCase_ = 0 for j in range(lowercase ): lowerCamelCase_ = a_i[j] + addend lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(lowercase , lowercase , lowercase ) return diff, i - start_i def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' for j in range(lowercase , len(lowercase ) ): lowerCamelCase_ = digits[j] + addend if s >= 10: lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , 10 ) lowerCamelCase_ = addend // 10 + quotient else: lowerCamelCase_ = s lowerCamelCase_ = addend // 10 if addend == 0: break while addend > 0: lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , 10 ) digits.append(lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase : int = 10**15 ): '''simple docstring''' lowerCamelCase_ = [1] lowerCamelCase_ = 1 lowerCamelCase_ = 0 while True: lowerCamelCase_ , lowerCamelCase_ = next_term(lowercase , 20 , i + dn , lowercase ) dn += terms_jumped if dn == n - i: break lowerCamelCase_ = 0 for j in range(len(lowercase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"""{solution() = }""")
651
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' return EnvironmentCommand() class A( UpperCamelCase ): '''simple docstring''' @staticmethod def a__ ( A_ : ArgumentParser ) -> str: """simple docstring""" lowerCamelCase_ = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = 'not installed' lowerCamelCase_ = 'NA' if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = 'not installed' if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = 'not installed' if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = 'not installed' if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def a__ ( A_ : Dict ) -> Any: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
651
1
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ): '''simple docstring''' return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Any ): '''simple docstring''' lowerCamelCase_ = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowerCamelCase_ = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' ) lowerCamelCase_ = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' ) lowerCamelCase_ = key.replace('heads.cmd.itm_head.cls' , 'itm_head' ) lowerCamelCase_ = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' ) lowerCamelCase_ = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' ) lowerCamelCase_ = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' ) lowerCamelCase_ = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' ) lowerCamelCase_ = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' ) lowerCamelCase_ = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' ) lowerCamelCase_ = key.replace('image_encoder.module' , 'flava.image_model' ) lowerCamelCase_ = key.replace('text_encoder.module' , 'flava.text_model' ) lowerCamelCase_ = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' ) lowerCamelCase_ = key.replace('mm_encoder.module' , 'flava.multimodal_model' ) lowerCamelCase_ = key.replace('text_projection' , 'flava.text_projection' ) lowerCamelCase_ = key.replace('image_projection' , 'flava.image_projection' ) lowerCamelCase_ = value.float() for key, value in codebook_state_dict.items(): lowerCamelCase_ = value return upgrade @torch.no_grad() def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Optional[int]=None ): '''simple docstring''' if config_path is not None: lowerCamelCase_ = FlavaConfig.from_pretrained(lowercase ) else: lowerCamelCase_ = FlavaConfig() lowerCamelCase_ = FlavaForPreTraining(lowercase ).eval() lowerCamelCase_ = convert_dalle_checkpoint(lowercase , lowercase , save_checkpoint=lowercase ) if os.path.exists(lowercase ): lowerCamelCase_ = torch.load(lowercase , map_location='cpu' ) else: lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' ) lowerCamelCase_ = upgrade_state_dict(lowercase , lowercase ) hf_model.load_state_dict(lowercase ) lowerCamelCase_ = hf_model.state_dict() lowerCamelCase_ = count_parameters(lowercase ) lowerCamelCase_ = count_parameters(lowercase ) + count_parameters(lowercase ) assert torch.allclose(lowercase , lowercase , atol=1e-3 ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") lowerCamelCase : str = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
651
from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 11 lowerCamelCase_ = int('1' + '0' * digit_len ) for num in range(lowercase , lowercase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase , lowercase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCamelCase_ = 10 return solutions def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ): '''simple docstring''' lowerCamelCase_ = 1.0 for fraction in fraction_list(lowercase ): lowerCamelCase_ = Fraction(lowercase ) result *= frac.denominator / frac.numerator return int(lowercase ) if __name__ == "__main__": print(solution())
651
1
import numpy as np import qiskit def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] = 8 , lowercase : Optional[int] = None ): '''simple docstring''' lowerCamelCase_ = np.random.default_rng(seed=SCREAMING_SNAKE_CASE_ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowerCamelCase_ = 6 * key_len # Measurement basis for Alice's qubits. lowerCamelCase_ = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ ) # The set of states Alice will prepare. lowerCamelCase_ = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ ) # Measurement basis for Bob's qubits. lowerCamelCase_ = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ ) # Quantum Circuit to simulate BB84 lowerCamelCase_ = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(SCREAMING_SNAKE_CASE_ ): if alice_state[index] == 1: bbaa_circ.x(SCREAMING_SNAKE_CASE_ ) if alice_basis[index] == 1: bbaa_circ.h(SCREAMING_SNAKE_CASE_ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(SCREAMING_SNAKE_CASE_ ): if bob_basis[index] == 1: bbaa_circ.h(SCREAMING_SNAKE_CASE_ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowerCamelCase_ = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1 , seed_simulator=SCREAMING_SNAKE_CASE_ ) # Returns the result of measurement. lowerCamelCase_ = job.result().get_counts(SCREAMING_SNAKE_CASE_ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowerCamelCase_ = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowerCamelCase_ = gen_key[:key_len] if len(SCREAMING_SNAKE_CASE_ ) >= key_len else gen_key.ljust(SCREAMING_SNAKE_CASE_ , '0' ) return key if __name__ == "__main__": print(F"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
700
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = size if size is not None else {'shortest_edge': 224} lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] ) lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ ) lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ ) def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray: """simple docstring""" return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray: """simple docstring""" return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(A_ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images] lowerCamelCase_ = {'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
651
0
def _SCREAMING_SNAKE_CASE ( lowercase : Any ): '''simple docstring''' lowerCamelCase_ = len(__A ) for i in range(1 , __A ): lowerCamelCase_ = collection[i] lowerCamelCase_ = 0 lowerCamelCase_ = i - 1 while low <= high: lowerCamelCase_ = (low + high) // 2 if val < collection[mid]: lowerCamelCase_ = mid - 1 else: lowerCamelCase_ = mid + 1 for j in range(__A , __A , -1 ): lowerCamelCase_ = collection[j - 1] lowerCamelCase_ = val return collection if __name__ == "__main__": lowerCamelCase : str = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
701
import cva import numpy as np class A: '''simple docstring''' def __init__( self : int , A_ : float , A_ : int ) -> List[Any]: """simple docstring""" if k in (0.04, 0.06): lowerCamelCase_ = k lowerCamelCase_ = window_size else: raise ValueError('invalid k value' ) def __str__( self : str ) -> str: """simple docstring""" return str(self.k ) def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowerCamelCase_ = cva.imread(A_ , 0 ) lowerCamelCase_ , lowerCamelCase_ = img.shape lowerCamelCase_ = [] lowerCamelCase_ = img.copy() lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB ) lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ ) lowerCamelCase_ = dx**2 lowerCamelCase_ = dy**2 lowerCamelCase_ = dx * dy lowerCamelCase_ = 0.04 lowerCamelCase_ = self.window_size // 2 for y in range(A_ , h - offset ): for x in range(A_ , w - offset ): lowerCamelCase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = (wxx * wyy) - (wxy**2) lowerCamelCase_ = wxx + wyy lowerCamelCase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3) lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
651
0
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : Optional[int] , A_ : int=2 , A_ : str=32 , A_ : Any=16 , A_ : Optional[int]=3 , A_ : Optional[int]=True , A_ : Optional[Any]=True , A_ : Dict=32 , A_ : Dict=4 , A_ : Any=[0, 1, 2, 3] , A_ : Optional[int]=4 , A_ : Tuple=37 , A_ : List[str]="gelu" , A_ : List[str]=0.1 , A_ : Dict=0.1 , A_ : Union[str, Any]=0.02 , A_ : Union[str, Any]=3 , A_ : Any=[1, 384, 24, 24] , A_ : List[Any]=True , A_ : str=None , ) -> Any: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = backbone_out_indices lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = backbone_featmap_shape lowerCamelCase_ = scope lowerCamelCase_ = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) lowerCamelCase_ = (image_size // patch_size) ** 2 lowerCamelCase_ = num_patches + 1 def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCAmelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def a__ ( self : str , A_ : int , A_ : Dict , A_ : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = DPTModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : str , A_ : Dict , A_ : int , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = DPTForDepthEstimation(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def a__ ( self : int , A_ : str , A_ : Dict , A_ : int ) -> List[Any]: """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = DPTForSemanticSegmentation(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def a__ ( self : Any ) -> List[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () UpperCamelCase = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = DPTModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" pass def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) ) def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCAmelCase_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def a__ ( self : Tuple ) -> str: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*UpperCAmelCase_ ) def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True if model_class in get_values(UpperCAmelCase_ ): continue lowerCamelCase_ = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() lowerCamelCase_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) lowerCamelCase_ = model(**UpperCAmelCase_ ).loss loss.backward() def a__ ( self : Tuple ) -> List[str]: """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = False lowerCamelCase_ = True if model_class in get_values(UpperCAmelCase_ ) or not model_class.supports_gradient_checkpointing: continue lowerCamelCase_ = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.gradient_checkpointing_enable() model.train() lowerCamelCase_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) lowerCamelCase_ = model(**UpperCAmelCase_ ).loss loss.backward() def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(UpperCAmelCase_ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=UpperCAmelCase_ ) # Skip the check for the backbone lowerCamelCase_ = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": lowerCamelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass @slow def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: lowerCamelCase_ = DPTModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 'add' with self.assertRaises(UpperCAmelCase_ ): lowerCamelCase_ = DPTForDepthEstimation(UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) lowerCamelCase_ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(UpperCAmelCase_ ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**UpperCAmelCase_ ) lowerCamelCase_ = outputs.predicted_depth # verify the predicted depth lowerCamelCase_ = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , UpperCAmelCase_ ) lowerCamelCase_ = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCAmelCase_ , atol=1E-4 ) )
702
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCamelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.encoder ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(A_ ): try: lowerCamelCase_ = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = new_word if len(A_ ) == 1: break else: lowerCamelCase_ = get_pairs(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = word return word def a__ ( self : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , A_ ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : str ) -> Optional[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def a__ ( self : Tuple , A_ : Dict ) -> List[Any]: """simple docstring""" return self.decoder.get(A_ ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ''.join(A_ ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) lowerCamelCase_ = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs) def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict: """simple docstring""" return token_ids_a + [self.eos_token_id] def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = self.encode(A_ ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
651
0
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = int(lowercase ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase ) lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , 2 ) return binary_recursive(lowercase ) + str(lowercase ) def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = str(lowercase ).strip() if not number: raise ValueError('No input value was provided' ) lowerCamelCase_ = '-' if number.startswith('-' ) else '' lowerCamelCase_ = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f"""{negative}0b{binary_recursive(int(lowercase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
703
lowerCamelCase : Dict = "Alexander Joslin" import operator as op from .stack import Stack def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowercase ) ) elif i in operators: # RULE 2 operator_stack.push(lowercase ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](lowercase , lowercase ) operand_stack.push(lowercase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
651
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = DiTPipeline UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS UpperCamelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS UpperCamelCase = False def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCamelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=UpperCamelCase_ , ) lowerCamelCase_ = AutoencoderKL() lowerCamelCase_ = DDIMScheduler() lowerCamelCase_ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[str]=0 ) -> List[str]: """simple docstring""" if str(UpperCamelCase_ ).startswith('mps' ): lowerCamelCase_ = torch.manual_seed(UpperCamelCase_ ) else: lowerCamelCase_ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowerCamelCase_ = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ = 'cpu' lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCamelCase_ = self.get_dummy_inputs(UpperCamelCase_ ) lowerCamelCase_ = pipe(**UpperCamelCase_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowerCamelCase_ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def a__ ( self : Dict ) -> str: """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=UpperCamelCase_ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def a__ ( self : Any ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) lowerCamelCase_ = ['vase', 'umbrella', 'white shark', 'white wolf'] lowerCamelCase_ = pipe.get_label_ids(UpperCamelCase_ ) lowerCamelCase_ = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=40 , output_type='np' ).images for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ): lowerCamelCase_ = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def a__ ( self : Optional[Any] ) -> int: """simple docstring""" lowerCamelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) lowerCamelCase_ = ['vase', 'umbrella'] lowerCamelCase_ = pipe.get_label_ids(UpperCamelCase_ ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=25 , output_type='np' ).images for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ): lowerCamelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
704
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) print('The following activities are selected:' ) # The first activity is always selected lowerCamelCase_ = 0 print(lowercase , end=',' ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end=',' ) lowerCamelCase_ = j if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5] lowerCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
651
0
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A( lowercase__ ): '''simple docstring''' def __init__( self : Tuple , A_ : Tuple , A_ : str ) -> List[str]: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase ) @torch.no_grad() def __call__( self : str , A_ : int = 1 , A_ : Optional[torch.Generator] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , **A_ : List[Any] , ) -> int: """simple docstring""" lowerCamelCase_ = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCamelCase , ) lowerCamelCase_ = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowerCamelCase ), "This is a local test"
705
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride lowerCamelCase_ = out_features lowerCamelCase_ = out_indices def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : List[Any] ) -> Any: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = FocalNetModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase_ = None lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) UpperCamelCase = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self : Any ) -> Optional[int]: """simple docstring""" return def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def a__ ( self : int ) -> int: """simple docstring""" pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A_ ) , A_ ) # FocalNet has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape lowerCamelCase_ = ( reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) @slow def a__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = FocalNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(A_ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=A_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**A_ ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else () UpperCamelCase = FocalNetConfig UpperCamelCase = False def a__ ( self : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self )
651
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated lowerCamelCase : int = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ lowerCamelCase : Optional[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0] @deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ): '''simple docstring''' print('Extracting' , f.name ) with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream: lowerCamelCase_ = _readaa(lowerCamelCase_ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) lowerCamelCase_ = _readaa(lowerCamelCase_ ) lowerCamelCase_ = _readaa(lowerCamelCase_ ) lowerCamelCase_ = _readaa(lowerCamelCase_ ) lowerCamelCase_ = bytestream.read(rows * cols * num_images ) lowerCamelCase_ = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta ) lowerCamelCase_ = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 ) return data @deprecated(lowerCamelCase_ , 'Please use tf.one_hot on tensors.' ) def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : int ): '''simple docstring''' lowerCamelCase_ = labels_dense.shape[0] lowerCamelCase_ = numpy.arange(lowerCamelCase_ ) * num_classes lowerCamelCase_ = numpy.zeros((num_labels, num_classes) ) lowerCamelCase_ = 1 return labels_one_hot @deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' ) def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Union[str, Any]=False , lowercase : Dict=10 ): '''simple docstring''' print('Extracting' , f.name ) with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream: lowerCamelCase_ = _readaa(lowerCamelCase_ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) lowerCamelCase_ = _readaa(lowerCamelCase_ ) lowerCamelCase_ = bytestream.read(lowerCamelCase_ ) lowerCamelCase_ = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ ) return labels class A: '''simple docstring''' @deprecated( UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self : Dict , A_ : Optional[int] , A_ : Optional[int] , A_ : List[str]=False , A_ : Optional[int]=False , A_ : Tuple=dtypes.floataa , A_ : int=True , A_ : Union[str, Any]=None , ) -> Tuple: """simple docstring""" lowerCamelCase_ = random_seed.get_seed(UpperCAmelCase_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) lowerCamelCase_ = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: lowerCamelCase_ = 10000 lowerCamelCase_ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"""images.shape: {images.shape} labels.shape: {labels.shape}""" lowerCamelCase_ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 lowerCamelCase_ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. lowerCamelCase_ = images.astype(numpy.floataa ) lowerCamelCase_ = numpy.multiply(UpperCAmelCase_ , 1.0 / 255.0 ) lowerCamelCase_ = images lowerCamelCase_ = labels lowerCamelCase_ = 0 lowerCamelCase_ = 0 @property def a__ ( self : List[Any] ) -> str: """simple docstring""" return self._images @property def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" return self._labels @property def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self._num_examples @property def a__ ( self : Dict ) -> Any: """simple docstring""" return self._epochs_completed def a__ ( self : Optional[int] , A_ : int , A_ : List[Any]=False , A_ : Dict=True ) -> List[Any]: """simple docstring""" if fake_data: lowerCamelCase_ = [1] * 784 lowerCamelCase_ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(UpperCAmelCase_ )], [fake_label for _ in range(UpperCAmelCase_ )], ) lowerCamelCase_ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: lowerCamelCase_ = numpy.arange(self._num_examples ) numpy.random.shuffle(UpperCAmelCase_ ) lowerCamelCase_ = self.images[perma] lowerCamelCase_ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch lowerCamelCase_ = self._num_examples - start lowerCamelCase_ = self._images[start : self._num_examples] lowerCamelCase_ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: lowerCamelCase_ = numpy.arange(self._num_examples ) numpy.random.shuffle(UpperCAmelCase_ ) lowerCamelCase_ = self.images[perm] lowerCamelCase_ = self.labels[perm] # Start next epoch lowerCamelCase_ = 0 lowerCamelCase_ = batch_size - rest_num_examples lowerCamelCase_ = self._index_in_epoch lowerCamelCase_ = self._images[start:end] lowerCamelCase_ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size lowerCamelCase_ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(lowerCamelCase_ , 'Please write your own downloading logic.' ) def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Dict , lowercase : int ): '''simple docstring''' if not gfile.Exists(lowerCamelCase_ ): gfile.MakeDirs(lowerCamelCase_ ) lowerCamelCase_ = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) if not gfile.Exists(lowerCamelCase_ ): urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310 with gfile.GFile(lowerCamelCase_ ) as f: lowerCamelCase_ = f.size() print('Successfully downloaded' , lowerCamelCase_ , lowerCamelCase_ , 'bytes.' ) return filepath @deprecated( lowerCamelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : str=False , lowercase : Union[str, Any]=False , lowercase : Union[str, Any]=dtypes.floataa , lowercase : Optional[int]=True , lowercase : Dict=50_00 , lowercase : Any=None , lowercase : Tuple=DEFAULT_SOURCE_URL , ): '''simple docstring''' if fake_data: def fake(): return _DataSet( [] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ ) lowerCamelCase_ = fake() lowerCamelCase_ = fake() lowerCamelCase_ = fake() return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ ) if not source_url: # empty string check lowerCamelCase_ = DEFAULT_SOURCE_URL lowerCamelCase_ = 'train-images-idx3-ubyte.gz' lowerCamelCase_ = 'train-labels-idx1-ubyte.gz' lowerCamelCase_ = 't10k-images-idx3-ubyte.gz' lowerCamelCase_ = 't10k-labels-idx1-ubyte.gz' lowerCamelCase_ = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file ) with gfile.Open(lowerCamelCase_ , 'rb' ) as f: lowerCamelCase_ = _extract_images(lowerCamelCase_ ) lowerCamelCase_ = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file ) with gfile.Open(lowerCamelCase_ , 'rb' ) as f: lowerCamelCase_ = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ ) lowerCamelCase_ = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file ) with gfile.Open(lowerCamelCase_ , 'rb' ) as f: lowerCamelCase_ = _extract_images(lowerCamelCase_ ) lowerCamelCase_ = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file ) with gfile.Open(lowerCamelCase_ , 'rb' ) as f: lowerCamelCase_ = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ ) if not 0 <= validation_size <= len(lowerCamelCase_ ): lowerCamelCase_ = ( 'Validation size should be between 0 and ' f"""{len(lowerCamelCase_ )}. Received: {validation_size}.""" ) raise ValueError(lowerCamelCase_ ) lowerCamelCase_ = train_images[:validation_size] lowerCamelCase_ = train_labels[:validation_size] lowerCamelCase_ = train_images[validation_size:] lowerCamelCase_ = train_labels[validation_size:] lowerCamelCase_ = {'dtype': dtype, 'reshape': reshape, 'seed': seed} lowerCamelCase_ = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) lowerCamelCase_ = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) lowerCamelCase_ = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
706
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ ) self.assertEqual( A_ , [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ] , ) lowerCamelCase_ = text_generator.model.config.eos_token_id lowerCamelCase_ = '<pad>' lowerCamelCase_ = text_generator( ['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , ) self.assertEqual( A_ , [ [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], ] , ) @require_tf def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ ) return text_generator, ["This is a test", "Another test"] def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = 'Hello I believe in' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) lowerCamelCase_ = text_generator(A_ ) self.assertEqual( A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' ) self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] ) def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = text_generator.model lowerCamelCase_ = text_generator.tokenizer lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ ) lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase_ = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase_ = text_generator('' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase_ = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500 , max_new_tokens=20 ) lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(A_ ): text_generator( 'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" import torch # Classic `model_kwargs` lowerCamelCase_ = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def a__ ( self : int ) -> str: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : List[Any] ) -> Dict: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=A_ , top_p=0.5 ) def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = 'Hello world' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' ) else: lowerCamelCase_ = logging.get_logger('transformers.generation.utils' ) lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 ) self.assertIn(A_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 ) self.assertNotIn(A_ , cl.out ) with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 ) self.assertNotIn(A_ , cl.out )
651
0
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCamelCase : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class A( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Dict , *A_ : List[Any] , **A_ : Dict ) -> List[Any]: """simple docstring""" super().__init__(*_lowercase , **_lowercase ) requires_backends(self , 'vision' ) self.check_model_type(_lowercase ) def __call__( self : Any , A_ : Union[str, Any] , **A_ : int ) -> int: """simple docstring""" return super().__call__(_lowercase , **_lowercase ) def a__ ( self : str , **A_ : List[str] ) -> Optional[int]: """simple docstring""" return {}, {}, {} def a__ ( self : str , A_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = load_image(_lowercase ) lowerCamelCase_ = image.size lowerCamelCase_ = self.image_processor(images=_lowercase , return_tensors=self.framework ) return model_inputs def a__ ( self : str , A_ : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model(**_lowercase ) return model_outputs def a__ ( self : Optional[Any] , A_ : Optional[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = model_outputs.predicted_depth lowerCamelCase_ = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=_lowercase ) lowerCamelCase_ = prediction.squeeze().cpu().numpy() lowerCamelCase_ = (output * 255 / np.max(_lowercase )).astype('uint8' ) lowerCamelCase_ = Image.fromarray(_lowercase ) lowerCamelCase_ = {} lowerCamelCase_ = predicted_depth lowerCamelCase_ = depth return output_dict
707
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) lowerCamelCase_ = self.diffusers_dir shutil.copy( os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int: """simple docstring""" lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCamelCase_ = black.format_str(A_ , mode=A_ ) lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(A_ , 'w' , newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A_ ) with open(A_ , 'r' ) as f: self.assertTrue(f.read() , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , ) # Copy consistency with a really long name lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
651
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : List[Any] , A_ : Any ) -> Dict: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): lowerCamelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(A_ ) def a__ ( self : List[Any] ) -> int: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A_ , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a__ ( self : List[str] ) -> int: """simple docstring""" lowerCamelCase_ = '''sgugger/tiny-distilbert-classification''' lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a__ ( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A_ , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ , [config] ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a__ ( self : Any ) -> Dict: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ , [config] ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a__ ( self : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ , [config] ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a__ ( self : str ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ , configs=[config] ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' ) def a__ ( self : int ) -> Dict: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A_ , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) lowerCamelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(A_ , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(A_ , 'env.csv' ) , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) benchmark.run() self.assertTrue(Path(os.path.join(A_ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(A_ , 'env.csv' ) ).exists() ) def a__ ( self : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(A_ : List[Any] ): self.assertTrue(hasattr(A_ , 'sequential' ) ) self.assertTrue(hasattr(A_ , 'cumulative' ) ) self.assertTrue(hasattr(A_ , 'current' ) ) self.assertTrue(hasattr(A_ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , 'log.txt' ) , log_print=A_ , trace_memory_line_by_line=A_ , eager_mode=A_ , multi_process=A_ , ) lowerCamelCase_ = TensorFlowBenchmark(A_ ) lowerCamelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(A_ , 'log.txt' ) ).exists() )
708
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = None ops.enable_eager_execution_internal() lowerCamelCase_ = tf.config.list_physical_devices('CPU' ) if len(A_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' ) lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ = GradientAccumulator() lowerCamelCase_ = tf.Variable([4.0, 3.0] ) lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 ) lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ ) def accumulate_on_replica(A_ : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(A_ : List[Any] , A_ : Tuple ): with strategy.scope(): lowerCamelCase_ = strategy.experimental_local_results(A_ ) local_variables[0].assign(A_ ) local_variables[1].assign(A_ ) strategy.run(A_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A_ ) def _check_local_values(A_ : List[Any] , A_ : str ): lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
651
0
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ): # picklable for multiprocessing '''simple docstring''' return x.sum() def _SCREAMING_SNAKE_CASE ( lowercase : int ): # picklable for multiprocessing '''simple docstring''' return i + 1 @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = {} lowerCamelCase_ = [] lowerCamelCase_ = 1 lowerCamelCase_ = [1, 2] lowerCamelCase_ = {'a': 1, 'b': 2} lowerCamelCase_ = {'a': [1, 2], 'b': [3, 4]} lowerCamelCase_ = {'a': {'1': 1}, 'b': 2} lowerCamelCase_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4} lowerCamelCase_ = {} lowerCamelCase_ = [] lowerCamelCase_ = 2 lowerCamelCase_ = [2, 3] lowerCamelCase_ = {'a': 2, 'b': 3} lowerCamelCase_ = {'a': [2, 3], 'b': [4, 5]} lowerCamelCase_ = {'a': {'1': 2}, 'b': 3} lowerCamelCase_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5} self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) lowerCamelCase_ = 2 self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) lowerCamelCase_ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )} lowerCamelCase_ = {'a': 2, 'b': 0, 'c': 2} lowerCamelCase_ = { 'a': np.eye(2 ).astype(_lowerCAmelCase ), 'b': np.zeros(3 ).astype(_lowerCAmelCase ), 'c': np.ones(2 ).astype(_lowerCAmelCase ), } self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda map_nested(lambda A_ : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase ) def a__ ( self : int ) -> Dict: """simple docstring""" lowerCamelCase_ = {'a': 1, 'b': 2} lowerCamelCase_ = {'a': 3, 'b': 4} lowerCamelCase_ = {'a': 5, 'b': 6} lowerCamelCase_ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase ) def a__ ( self : str ) -> List[str]: """simple docstring""" class A: '''simple docstring''' UpperCamelCase = '''bar''' lowerCamelCase_ = Foo() self.assertEqual(foo.my_attr , 'bar' ) with temporary_assignment(_lowerCAmelCase , 'my_attr' , 'BAR' ): self.assertEqual(foo.my_attr , 'BAR' ) self.assertEqual(foo.my_attr , 'bar' ) @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Tuple , lowercase : int ): '''simple docstring''' with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: lowerCamelCase_ = {f"""{i}""": i for i in range(__lowerCAmelCase )} lowerCamelCase_ = map_nested(lambda lowercase : x + 10 , __lowerCAmelCase , num_proc=__lowerCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A( UpperCamelCase ): '''simple docstring''' @require_tf def a__ ( self : Optional[int] ) -> int: """simple docstring""" import tensorflow as tf from tensorflow.keras import layers lowerCamelCase_ = layers.Dense(2 ) def gen_random_output(): lowerCamelCase_ = tf.random.uniform((1, 3) ) return model(_lowerCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=_lowerCAmelCase ): lowerCamelCase_ = gen_random_output() with temp_seed(42 , set_tensorflow=_lowerCAmelCase ): lowerCamelCase_ = gen_random_output() lowerCamelCase_ = gen_random_output() np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def a__ ( self : Any ) -> int: """simple docstring""" import torch def gen_random_output(): lowerCamelCase_ = torch.nn.Linear(3 , 2 ) lowerCamelCase_ = torch.rand(1 , 3 ) return model(_lowerCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=_lowerCAmelCase ): lowerCamelCase_ = gen_random_output() with temp_seed(42 , set_pytorch=_lowerCAmelCase ): lowerCamelCase_ = gen_random_output() lowerCamelCase_ = gen_random_output() np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def a__ ( self : str ) -> List[Any]: """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): lowerCamelCase_ = gen_random_output() with temp_seed(42 ): lowerCamelCase_ = gen_random_output() lowerCamelCase_ = gen_random_output() np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('input_data' , [{}] ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ): '''simple docstring''' lowerCamelCase_ = NestedDataStructure(__lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = NestedDataStructure(__lowerCAmelCase ).flatten() assert output == expected_output def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = A(x=1 , y='foobar' ) lowerCamelCase_ = {'x': 1, 'y': 'foobar'} assert asdict(__lowerCAmelCase ) == expected_output lowerCamelCase_ = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]} lowerCamelCase_ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]} assert asdict(__lowerCAmelCase ) == expected_output with pytest.raises(__lowerCAmelCase ): asdict([1, A(x=10 , y='foo' )] ) def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' return text.split() def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with Pool(2 ) as pool: lowerCamelCase_ = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(__lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: lowerCamelCase_ = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(__lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: lowerCamelCase_ = [] for yield_time, content in iflatmap_unordered( __lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__lowerCAmelCase ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(__lowerCAmelCase ) == 4
709
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg") lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = cn.convert_to_negative(lowercase ) # assert negative_img array for at least one True assert negative_img.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase_ = canny.canny(lowercase ) # assert canny array for at least one True assert canny_array.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase ) assert res.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert med.median_filter(lowercase , 3 ).any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase ) assert grad.any() and theta.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = sp.make_sepia(lowercase , 20 ) assert sepia.all() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCamelCase_ = imread(lowercase , 0 ) # Test for get_neighbors_pixel function() return not None lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = image[x_coordinate][y_coordinate] lowerCamelCase_ = lbp.get_neighbors_pixel( lowercase , lowercase , lowercase , lowercase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase ) assert lbp_image.any()
651
0
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging lowerCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ): '''simple docstring''' if isinstance(lowerCamelCase__ , np.ndarray ): return list(tensor.shape ) lowerCamelCase_ = tf.shape(lowerCamelCase__ ) if tensor.shape == tf.TensorShape(lowerCamelCase__ ): return dynamic lowerCamelCase_ = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase__ )] def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[Any] = None , lowercase : List[str] = None ): '''simple docstring''' return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase__ , name=lowerCamelCase__ ) def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict , lowercase : Tuple , lowercase : Dict=1e-5 , lowercase : Any=-1 ): '''simple docstring''' if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized lowerCamelCase_ = tf.nn.moments(lowerCamelCase__ , axes=[axis] , keepdims=lowerCamelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowerCamelCase_ = [1] * inputs.shape.rank lowerCamelCase_ = shape_list(lowerCamelCase__ )[axis] lowerCamelCase_ = tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) # Compute layer normalization using the batch_normalization # function. lowerCamelCase_ = tf.nn.batch_normalization( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , offset=lowerCamelCase__ , scale=lowerCamelCase__ , variance_epsilon=lowerCamelCase__ , ) return outputs def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[str]=0 , lowercase : Any=-1 ): '''simple docstring''' if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowerCamelCase_ = tf.shape(lowerCamelCase__ ) lowerCamelCase_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowerCamelCase_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' if not isinstance(lowerCamelCase__ , tf.Tensor ): lowerCamelCase_ = tf.convert_to_tensor(lowerCamelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowerCamelCase_ = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowerCamelCase_ = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowerCamelCase_ = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int , lowercase : Optional[int] = "input_ids" ): '''simple docstring''' tf.debugging.assert_less( lowerCamelCase__ , tf.cast(lowerCamelCase__ , dtype=tensor.dtype ) , message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase__ )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowerCamelCase_ = [x for x in data if len(lowerCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowerCamelCase_ = np.asarray(lowerCamelCase__ ) lowerCamelCase_ = 1 lowerCamelCase_ = np.array_split(lowerCamelCase__ , lowerCamelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowerCamelCase_ = np.array_split(lowerCamelCase__ , lowerCamelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(lowerCamelCase__ ): lowerCamelCase_ = chunk_data else: lowerCamelCase_ = data def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[Any] ): '''simple docstring''' if name in group.attrs: lowerCamelCase_ = [n.decode('utf8' ) if hasattr(lowerCamelCase__ , 'decode' ) else n for n in group.attrs[name]] else: lowerCamelCase_ = [] lowerCamelCase_ = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(lowerCamelCase__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ): '''simple docstring''' def _expand_single_ad_tensor(lowercase : Optional[Any] ): if isinstance(lowerCamelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(lowerCamelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase__ )
710
class A: '''simple docstring''' def __init__( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = {} def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int: """simple docstring""" if vertex not in self.adjacency: lowerCamelCase_ = {} self.num_vertices += 1 def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple: """simple docstring""" self.add_vertex(A_ ) self.add_vertex(A_ ) if head == tail: return lowerCamelCase_ = weight lowerCamelCase_ = weight def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for i in range(len(A_ ) ): lowerCamelCase_ = list(edges[i] ) edges.sort(key=lambda A_ : e[2] ) for i in range(len(A_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowerCamelCase_ = edges[i][2] + 1 for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = weight lowerCamelCase_ = weight def __str__( self : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = '' for tail in self.adjacency: for head in self.adjacency[tail]: lowerCamelCase_ = self.adjacency[head][tail] string += f"""{head} -> {tail} == {weight}\n""" return string.rstrip('\n' ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a__ ( self : List[str] ) -> int: """simple docstring""" return self.adjacency.keys() @staticmethod def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]: """simple docstring""" lowerCamelCase_ = Graph() if vertices is None: lowerCamelCase_ = [] if edges is None: lowerCamelCase_ = [] for vertex in vertices: g.add_vertex(A_ ) for edge in edges: g.add_edge(*A_ ) return g class A: '''simple docstring''' def __init__( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = {} lowerCamelCase_ = {} def __len__( self : Any ) -> List[str]: """simple docstring""" return len(self.parent ) def a__ ( self : List[str] , A_ : Any ) -> Dict: """simple docstring""" if item in self.parent: return self.find(A_ ) lowerCamelCase_ = item lowerCamelCase_ = 0 return item def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]: """simple docstring""" if item not in self.parent: return self.make_set(A_ ) if item != self.parent[item]: lowerCamelCase_ = self.find(self.parent[item] ) return self.parent[item] def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.find(A_ ) lowerCamelCase_ = self.find(A_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] < self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowerCamelCase_ = roota return roota return None @staticmethod def a__ ( A_ : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = graph.num_vertices lowerCamelCase_ = Graph.UnionFind() lowerCamelCase_ = [] while num_components > 1: lowerCamelCase_ = {} for vertex in graph.get_vertices(): lowerCamelCase_ = -1 lowerCamelCase_ = graph.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = union_find.find(A_ ) lowerCamelCase_ = union_find.find(A_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex] if union_find.find(A_ ) != union_find.find(A_ ): union_find.union(A_ , A_ ) mst_edges.append(cheap_edge[vertex] ) lowerCamelCase_ = num_components - 1 lowerCamelCase_ = Graph.build(edges=A_ ) return mst
651
0
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''linear''' UpperCamelCase = '''cosine''' UpperCamelCase = '''cosine_with_restarts''' UpperCamelCase = '''polynomial''' UpperCamelCase = '''constant''' UpperCamelCase = '''constant_with_warmup''' UpperCamelCase = '''piecewise_constant''' def _SCREAMING_SNAKE_CASE ( lowercase : Optimizer , lowercase : int = -1 ): '''simple docstring''' return LambdaLR(_lowerCAmelCase , lambda lowercase : 1 , last_epoch=_lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( lowercase : Optimizer , lowercase : int , lowercase : int = -1 ): '''simple docstring''' def lr_lambda(lowercase : int ): if current_step < num_warmup_steps: return float(_lowerCAmelCase ) / float(max(1.0 , _lowerCAmelCase ) ) return 1.0 return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , last_epoch=_lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( lowercase : Optimizer , lowercase : str , lowercase : int = -1 ): '''simple docstring''' lowerCamelCase_ = {} lowerCamelCase_ = step_rules.split(',' ) for rule_str in rule_list[:-1]: lowerCamelCase_ = rule_str.split(':' ) lowerCamelCase_ = int(_lowerCAmelCase ) lowerCamelCase_ = float(_lowerCAmelCase ) lowerCamelCase_ = value lowerCamelCase_ = float(rule_list[-1] ) def create_rules_function(lowercase : str , lowercase : Optional[Any] ): def rule_func(lowercase : int ) -> float: lowerCamelCase_ = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(_lowerCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCamelCase_ = create_rules_function(_lowerCAmelCase , _lowerCAmelCase ) return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , last_epoch=_lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[Any] , lowercase : List[str] , lowercase : Dict=-1 ): '''simple docstring''' def lr_lambda(lowercase : int ): if current_step < num_warmup_steps: return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( lowercase : Optimizer , lowercase : int , lowercase : int , lowercase : float = 0.5 , lowercase : int = -1 ): '''simple docstring''' def lr_lambda(lowercase : Union[str, Any] ): if current_step < num_warmup_steps: return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) ) lowerCamelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCAmelCase ) * 2.0 * progress )) ) return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( lowercase : Optimizer , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : int = -1 ): '''simple docstring''' def lr_lambda(lowercase : Optional[Any] ): if current_step < num_warmup_steps: return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) ) lowerCamelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Union[str, Any] , lowercase : Union[str, Any]=1e-7 , lowercase : Tuple=1.0 , lowercase : int=-1 ): '''simple docstring''' lowerCamelCase_ = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowercase : int ): if current_step < num_warmup_steps: return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCamelCase_ = lr_init - lr_end lowerCamelCase_ = num_training_steps - num_warmup_steps lowerCamelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps lowerCamelCase_ = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowerCamelCase : Optional[int] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, SchedulerType] , lowercase : Optimizer , lowercase : Optional[str] = None , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 1.0 , lowercase : int = -1 , ): '''simple docstring''' lowerCamelCase_ = SchedulerType(_lowerCAmelCase ) lowerCamelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_lowerCAmelCase , last_epoch=_lowerCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_lowerCAmelCase , step_rules=_lowerCAmelCase , last_epoch=_lowerCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , last_epoch=_lowerCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , num_training_steps=_lowerCAmelCase , num_cycles=_lowerCAmelCase , last_epoch=_lowerCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , num_training_steps=_lowerCAmelCase , power=_lowerCAmelCase , last_epoch=_lowerCAmelCase , ) return schedule_func( _lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , num_training_steps=_lowerCAmelCase , last_epoch=_lowerCAmelCase )
711
def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 0 for i in range(1 , 10_01 ): total += i**i return str(lowercase )[-10:] if __name__ == "__main__": print(solution())
651
0
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Dict ): '''simple docstring''' lowerCamelCase_ = k_size // 2 lowerCamelCase_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase_ = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) ) return g def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : str , lowercase : Any ): '''simple docstring''' lowerCamelCase_ = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase_ = height - k_size + 1 lowerCamelCase_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase_ = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase_ = 0 for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ): lowerCamelCase_ = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase_ = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase_ = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase_ = ravel(__lowerCAmelCase ) # reshape and get the dst image lowerCamelCase_ = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase ) return dst if __name__ == "__main__": # read original image lowerCamelCase : Tuple = imread(r"../image_data/lena.jpg") # turn image in gray scale value lowerCamelCase : str = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size lowerCamelCase : Tuple = gaussian_filter(gray, 3, sigma=1) lowerCamelCase : Dict = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
712
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"] lowerCamelCase : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
0
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = np.inf def set_batch_size(lowercase : List[str] ) -> None: nonlocal batch_size if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase_ = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase_ = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and feature.dtype == "binary": lowerCamelCase_ = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__UpperCAmelCase , __UpperCAmelCase ) return None if batch_size is np.inf else batch_size class A( _snake_case ): '''simple docstring''' def __init__( self : Tuple , A_ : NestedDataStructureLike[PathLike] , A_ : Optional[NamedSplit] = None , A_ : Optional[Features] = None , A_ : str = None , A_ : bool = False , A_ : bool = False , A_ : Optional[int] = None , **A_ : Union[str, Any] , ) -> List[str]: """simple docstring""" super().__init__( A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , ) lowerCamelCase_ = path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths} lowerCamelCase_ = _PACKAGED_DATASETS_MODULES['parquet'][1] lowerCamelCase_ = Parquet( cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , ) def a__ ( self : Any ) -> int: """simple docstring""" if self.streaming: lowerCamelCase_ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None self.builder.download_and_prepare( download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , ) lowerCamelCase_ = self.builder.as_dataset( split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory ) return dataset class A: '''simple docstring''' def __init__( self : List[Any] , A_ : Dataset , A_ : Union[PathLike, BinaryIO] , A_ : Optional[int] = None , **A_ : List[Any] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = dataset lowerCamelCase_ = path_or_buf lowerCamelCase_ = batch_size or get_writer_batch_size(dataset.features ) lowerCamelCase_ = parquet_writer_kwargs def a__ ( self : Optional[Any] ) -> int: """simple docstring""" lowerCamelCase_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: lowerCamelCase_ = self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs ) else: lowerCamelCase_ = self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs ) return written def a__ ( self : Any , A_ : BinaryIO , A_ : int , **A_ : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = parquet_writer_kwargs.pop('path_or_buf' , A_ ) lowerCamelCase_ = self.dataset.features.arrow_schema lowerCamelCase_ = pq.ParquetWriter(A_ , schema=A_ , **A_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): lowerCamelCase_ = query_table( table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(A_ ) written += batch.nbytes writer.close() return written
713
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCamelCase : int = datasets.logging.get_logger(__name__) lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ): '''simple docstring''' lowerCamelCase_ = {doc: key_lines} lowerCamelCase_ = {doc: sys_lines} lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) if remove_nested: lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ): '''simple docstring''' lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 for name, metric in metrics: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: lowerCamelCase_ = (conll / 3) * 1_00 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: lowerCamelCase_ = line.split()[5] if not parse_col == "-": lowerCamelCase_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A( datasets.Metric ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]: """simple docstring""" lowerCamelCase_ = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: lowerCamelCase_ = util.check_gold_parse_annotation(A_ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase_ = evaluate( key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , ) return score
651
0
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Any , lowercase : int , lowercase : Any , lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Any=False , ): '''simple docstring''' output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) else: export( lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : int = False ): '''simple docstring''' lowerCamelCase_ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowerCamelCase_ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: lowerCamelCase_ = 'cpu' lowerCamelCase_ = Path(lowerCAmelCase__ ) # VAE DECODER lowerCamelCase_ = AutoencoderKL.from_pretrained(model_path + '/vae' ) lowerCamelCase_ = vae_decoder.config.latent_channels # forward only through the decoder part lowerCamelCase_ = vae_decoder.decode onnx_export( lowerCAmelCase__ , model_args=( torch.randn(1 , lowerCAmelCase__ , 25 , 25 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=lowerCAmelCase__ , ) del vae_decoder if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCamelCase : List[Any] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
714
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase ) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''text''': Value('''string''' )} ) UpperCamelCase = Features({} ) UpperCamelCase = "text" @property def a__ ( self : List[Any] ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
651
0
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Dict , A_ : List[Any] , A_ : Any=13 , A_ : Union[str, Any]=[30, 30] , A_ : str=2 , A_ : Tuple=3 , A_ : int=True , A_ : Tuple=True , A_ : Union[str, Any]=32 , A_ : str=5 , A_ : List[Any]=4 , A_ : Optional[int]=37 , A_ : List[Any]="gelu" , A_ : int=0.1 , A_ : List[Any]=0.1 , A_ : Optional[int]=10 , A_ : List[Any]=0.02 , A_ : List[Any]=3 , A_ : Dict=None , A_ : Optional[Any]=8 , A_ : List[str]=10 , ) -> str: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = n_targets lowerCamelCase_ = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCamelCase_ = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCamelCase_ = num_patches + 1 + self.num_detection_tokens def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCamelCase_ = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCamelCase_ = [] for i in range(self.batch_size ): lowerCamelCase_ = {} lowerCamelCase_ = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_UpperCamelCase ) lowerCamelCase_ = torch.rand(self.n_targets , 4 , device=_UpperCamelCase ) labels.append(_UpperCamelCase ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : Any ) -> str: """simple docstring""" return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def a__ ( self : Dict , A_ : Optional[Any] , A_ : int , A_ : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = YolosModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() lowerCamelCase_ = model(_UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def a__ ( self : Dict , A_ : Optional[int] , A_ : Optional[int] , A_ : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = YolosForObjectDetection(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() lowerCamelCase_ = model(pixel_values=_UpperCamelCase ) lowerCamelCase_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCamelCase_ = model(pixel_values=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : str , A_ : List[str] , A_ : Optional[Any] , A_ : Dict=False ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCamelCase_ = [] for i in range(self.model_tester.batch_size ): lowerCamelCase_ = {} lowerCamelCase_ = torch.ones( size=(self.model_tester.n_targets,) , device=_UpperCamelCase , dtype=torch.long ) lowerCamelCase_ = torch.ones( self.model_tester.n_targets , 4 , device=_UpperCamelCase , dtype=torch.float ) labels.append(_UpperCamelCase ) lowerCamelCase_ = labels return inputs_dict def a__ ( self : int ) -> Dict: """simple docstring""" lowerCamelCase_ = YolosModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Optional[Any] ) -> str: """simple docstring""" pass def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) ) def a__ ( self : Optional[int] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(_UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True # in YOLOS, the seq_len is different lowerCamelCase_ = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = True lowerCamelCase_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) lowerCamelCase_ = outputs.attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ = True lowerCamelCase_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) lowerCamelCase_ = outputs.attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCamelCase_ = len(_UpperCamelCase ) # Check attention is always last and order is fine lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) lowerCamelCase_ = 1 self.assertEqual(out_len + added_hidden_states , len(_UpperCamelCase ) ) lowerCamelCase_ = outputs.attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def a__ ( self : Tuple ) -> Any: """simple docstring""" def check_hidden_states_output(A_ : Union[str, Any] , A_ : Optional[int] , A_ : int ): lowerCamelCase_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) # YOLOS has a different seq_length lowerCamelCase_ = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def a__ ( self : int ) -> List[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_UpperCamelCase ) @slow def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = YolosModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( ) -> Any: '''simple docstring''' lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : Any ) -> List[str]: """simple docstring""" return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(_UpperCamelCase ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(inputs.pixel_values ) # verify outputs lowerCamelCase_ = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCamelCase , ) lowerCamelCase_ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) ) # verify postprocessing lowerCamelCase_ = image_processor.post_process_object_detection( _UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCamelCase_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_UpperCamelCase ) lowerCamelCase_ = [75, 75, 17, 63, 17] lowerCamelCase_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_UpperCamelCase ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , _UpperCamelCase , atol=1E-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , _UpperCamelCase ) self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCamelCase ) )
715
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''new-model''' if is_tf_available(): class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = NewModelConfig @require_tf class A( unittest.TestCase ): '''simple docstring''' @slow def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : int ) -> str: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Any ) -> List[Any]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Tuple ) -> str: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Any: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow @require_tensorflow_probability def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained( A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = copy.deepcopy(model.config ) lowerCamelCase_ = ['FunnelBaseModel'] lowerCamelCase_ = TFAutoModel.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : Any ) -> Tuple: """simple docstring""" try: AutoConfig.register('new-model' , A_ ) lowerCamelCase_ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) auto_class.register(A_ , A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCamelCase_ = BertModelTester(self ).get_config() lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() ) lowerCamelCase_ = auto_class.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = auto_class.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def a__ ( self : int ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'bert-base is not a local folder and is not a valid model identifier' ): lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' ) def a__ ( self : Any ) -> Dict: """simple docstring""" with self.assertRaisesRegex( A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' ) def a__ ( self : str ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def a__ ( self : Any ) -> List[Any]: """simple docstring""" with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
651
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCamelCase : List[Any] = datasets.utils.logging.get_logger(__name__) lowerCamelCase : List[str] = ["names", "prefix"] lowerCamelCase : Optional[int] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowerCamelCase : int = ["encoding_errors", "on_bad_lines"] lowerCamelCase : Union[str, Any] = ["date_format"] @dataclass class A( datasets.BuilderConfig ): '''simple docstring''' UpperCamelCase = ''',''' UpperCamelCase = None UpperCamelCase = '''infer''' UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = True UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = False UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = True UpperCamelCase = True UpperCamelCase = False UpperCamelCase = True UpperCamelCase = None UpperCamelCase = '''.''' UpperCamelCase = None UpperCamelCase = '''"''' UpperCamelCase = 0 UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = True UpperCamelCase = True UpperCamelCase = 0 UpperCamelCase = True UpperCamelCase = False UpperCamelCase = None UpperCamelCase = 1_0000 UpperCamelCase = None UpperCamelCase = '''strict''' UpperCamelCase = '''error''' UpperCamelCase = None def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" if self.delimiter is not None: lowerCamelCase_ = self.delimiter if self.column_names is not None: lowerCamelCase_ = self.column_names @property def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _A ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCamelCase = CsvConfig def a__ ( self : Any ) -> Dict: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def a__ ( self : Dict , A_ : Union[str, Any] ) -> Dict: """simple docstring""" if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): lowerCamelCase_ = data_files if isinstance(_A , _A ): lowerCamelCase_ = [files] lowerCamelCase_ = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] lowerCamelCase_ = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): lowerCamelCase_ = [files] lowerCamelCase_ = [dl_manager.iter_files(_A ) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files} ) ) return splits def a__ ( self : Any , A_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" if self.config.features is not None: lowerCamelCase_ = self.config.features.arrow_schema if all(not require_storage_cast(_A ) for feature in self.config.features.values() ): # cheaper cast lowerCamelCase_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_A ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowerCamelCase_ = table_cast(_A , _A ) return pa_table def a__ ( self : str , A_ : str ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowerCamelCase_ = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_A ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): lowerCamelCase_ = pd.read_csv(_A , iterator=_A , dtype=_A , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_A ): lowerCamelCase_ = pa.Table.from_pandas(_A ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(_A )}: {e}""" ) raise
716
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[str] = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''gpt_neox_japanese''' def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_multiple_size lowerCamelCase_ = hidden_act lowerCamelCase_ = rotary_pct lowerCamelCase_ = rotary_emb_base lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = use_cache lowerCamelCase_ = attention_dropout lowerCamelCase_ = hidden_dropout
651
0
'''simple docstring''' from __future__ import annotations from cmath import sqrt def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Optional[Any] ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) lowerCamelCase_ = b * b - 4 * a * c lowerCamelCase_ = (-b + sqrt(_lowerCamelCase )) / (2 * a) lowerCamelCase_ = (-b - sqrt(_lowerCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
717
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowerCamelCase : List[Any] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowerCamelCase : Tuple = logging.getLogger() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('-f' ) lowerCamelCase_ = parser.parse_args() return args.f def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ): '''simple docstring''' lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase , 'r' ) as f: return json.load(lowercase ) raise ValueError(f"""can't find {path}""" ) lowerCamelCase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_glue.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) @slow def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_clm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 100 ) @slow def a__ ( self : str ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A_ , 'argv' , A_ ): run_summarization_flax.main() lowerCamelCase_ = get_results(A_ , split='test' ) self.assertGreaterEqual(result['test_rouge1'] , 10 ) self.assertGreaterEqual(result['test_rouge2'] , 2 ) self.assertGreaterEqual(result['test_rougeL'] , 7 ) self.assertGreaterEqual(result['test_rougeLsum'] , 7 ) @slow def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 42 ) @slow def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_ta_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.42 ) @slow def a__ ( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2 lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_ner.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertGreaterEqual(result['eval_f1'] , 0.3 ) @slow def a__ ( self : str ) -> int: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_qa.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_f1'] , 30 ) self.assertGreaterEqual(result['eval_exact'] , 30 )
651
0
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig lowerCamelCase : Dict = { "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json", "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json", } class A( _UpperCAmelCase ): '''simple docstring''' UpperCamelCase = '''ernie_m''' UpperCamelCase = {'''dropout''': '''classifier_dropout''', '''num_classes''': '''num_labels'''} def __init__( self : Dict , A_ : int = 250002 , A_ : int = 768 , A_ : int = 12 , A_ : int = 12 , A_ : int = 3072 , A_ : str = "gelu" , A_ : float = 0.1 , A_ : float = 0.1 , A_ : int = 514 , A_ : float = 0.02 , A_ : int = 1 , A_ : float = 1E-05 , A_ : Any=None , A_ : Any=False , A_ : Dict=0.0 , **A_ : List[str] , ) -> int: """simple docstring""" super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = classifier_dropout lowerCamelCase_ = is_decoder lowerCamelCase_ = act_dropout
718
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = None UpperCamelCase = None lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess") def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right ) lowerCamelCase_ = 1 - left_distrib_excess lowerCamelCase_ = 1 - right_distrib_excess lowerCamelCase_ = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) lowerCamelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
651
0
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCamelCase : int = logging.get_logger("transformers.models.encodec") lowerCamelCase : Any = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCamelCase : Dict = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCamelCase : Any = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCamelCase : List[Any] = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCamelCase : List[Any] = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCamelCase : Dict = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCamelCase : int = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCamelCase : str = [] lowerCamelCase : List[Any] = [] def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : int , lowercase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowerCamelCase_ = getattr(_A , _A ) if weight_type is not None: lowerCamelCase_ = getattr(_A , _A ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "weight_ih_l0": lowerCamelCase_ = value elif weight_type == "weight_hh_l0": lowerCamelCase_ = value elif weight_type == "bias_ih_l0": lowerCamelCase_ = value elif weight_type == "bias_hh_l0": lowerCamelCase_ = value elif weight_type == "weight_ih_l1": lowerCamelCase_ = value elif weight_type == "weight_hh_l1": lowerCamelCase_ = value elif weight_type == "bias_ih_l1": lowerCamelCase_ = value elif weight_type == "bias_hh_l1": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Optional[Any] ): '''simple docstring''' for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase_ , lowerCamelCase_ = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Tuple , lowercase : str ): '''simple docstring''' lowerCamelCase_ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase_ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase_ = MAPPING_48K else: raise ValueError(f"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(_A , _A ): logger.info(f"""{name} was ignored""" ) continue lowerCamelCase_ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase_ , lowerCamelCase_ = key.split('.*.' ) if prefix in name and suffix in name: lowerCamelCase_ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(_A )[0].split('.' )[-2] lowerCamelCase_ = mapped_key.replace('*' , _A ) if "weight_g" in name: lowerCamelCase_ = 'weight_g' elif "weight_v" in name: lowerCamelCase_ = 'weight_v' elif "weight_ih_l0" in name: lowerCamelCase_ = 'weight_ih_l0' elif "weight_hh_l0" in name: lowerCamelCase_ = 'weight_hh_l0' elif "bias_ih_l0" in name: lowerCamelCase_ = 'bias_ih_l0' elif "bias_hh_l0" in name: lowerCamelCase_ = 'bias_hh_l0' elif "weight_ih_l1" in name: lowerCamelCase_ = 'weight_ih_l1' elif "weight_hh_l1" in name: lowerCamelCase_ = 'weight_hh_l1' elif "bias_ih_l1" in name: lowerCamelCase_ = 'bias_ih_l1' elif "bias_hh_l1" in name: lowerCamelCase_ = 'bias_hh_l1' elif "bias" in name: lowerCamelCase_ = 'bias' elif "weight" in name: lowerCamelCase_ = 'weight' elif "running_mean" in name: lowerCamelCase_ = 'running_mean' elif "running_var" in name: lowerCamelCase_ = 'running_var' elif "num_batches_tracked" in name: lowerCamelCase_ = 'num_batches_tracked' else: lowerCamelCase_ = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f"""Unused weights: {unused_weights}""" ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[Any] , lowercase : str , lowercase : Union[str, Any]=None , lowercase : Union[str, Any]=None , ): '''simple docstring''' if config_path is not None: lowerCamelCase_ = EncodecConfig.from_pretrained(_A ) else: lowerCamelCase_ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase_ = [8, 5, 4, 4] lowerCamelCase_ = [2.2] lowerCamelCase_ = 64 lowerCamelCase_ = 3_20_00 lowerCamelCase_ = 20_48 lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False elif model_name == "encodec_48khz": lowerCamelCase_ = [8, 5, 4, 2] lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0] lowerCamelCase_ = 4_80_00 lowerCamelCase_ = 2 lowerCamelCase_ = False lowerCamelCase_ = 'time_group_norm' lowerCamelCase_ = True lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.01 else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCamelCase_ = EncodecModel(_A ) lowerCamelCase_ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(_A ) lowerCamelCase_ = torch.load(_A ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase_ = original_checkpoint['best_state'] recursively_load_weights(_A , _A , _A ) model.save_pretrained(_A ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(_A ) model.push_to_hub(_A ) if __name__ == "__main__": lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) lowerCamelCase : int = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
719
from manim import * class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('CPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(4 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('GPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) gpu.move_to([-1, -1, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Model' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) model.move_to([3, -1.0, 0] ) self.add(A_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for i, rect in enumerate(A_ ): lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 ) target.move_to(A_ ) model_arr.append(A_ ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(A_ ) self.add(*A_ , *A_ ) lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Disk' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) disk.move_to([-4, -1.25, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(A_ ) lowerCamelCase_ = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ ) ) lowerCamelCase_ = Square(0.3 ) input.set_fill(A_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , A_ , buff=0.5 ) self.play(Write(A_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 ) self.play(MoveToTarget(A_ ) ) self.play(FadeOut(A_ ) ) lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , A_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCamelCase_ = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) ) lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCamelCase_ = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCamelCase_ = AnimationGroup( FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(A_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCamelCase_ = 0.7 self.play( Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCamelCase_ = a_c lowerCamelCase_ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , ) lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) ) self.wait()
651
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A: '''simple docstring''' def __init__( self : List[Any] , A_ : Tuple , A_ : Union[str, Any]=14 , A_ : List[str]=7 , A_ : List[str]=True , A_ : Tuple=True , A_ : Optional[Any]=False , A_ : List[Any]=True , A_ : Union[str, Any]=99 , A_ : int=32 , A_ : Optional[Any]=4 , A_ : Any=4 , A_ : str=4 , A_ : Union[str, Any]=37 , A_ : str="gelu" , A_ : Optional[Any]=0.1 , A_ : Tuple=0.1 , A_ : Any=512 , A_ : List[str]=0.02 , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = rotary_dim lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = initializer_range lowerCamelCase_ = None lowerCamelCase_ = vocab_size - 1 lowerCamelCase_ = vocab_size - 1 lowerCamelCase_ = vocab_size - 1 def a__ ( self : str ) -> Any: """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def a__ ( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def a__ ( self : Optional[Any] , A_ : Tuple , A_ : Tuple , A_ : Dict , A_ : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = 20 lowerCamelCase_ = model_class_name(snake_case_ ) lowerCamelCase_ = model.init_cache(input_ids.shape[0] , snake_case_ ) lowerCamelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) lowerCamelCase_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase_ = model( input_ids[:, :-1] , attention_mask=snake_case_ , past_key_values=snake_case_ , position_ids=snake_case_ , ) lowerCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) lowerCamelCase_ = model( input_ids[:, -1:] , attention_mask=snake_case_ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case_ , ) lowerCamelCase_ = model(snake_case_ ) lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def a__ ( self : Optional[int] , A_ : List[str] , A_ : Any , A_ : Optional[int] , A_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = 20 lowerCamelCase_ = model_class_name(snake_case_ ) lowerCamelCase_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowerCamelCase_ = model.init_cache(input_ids.shape[0] , snake_case_ ) lowerCamelCase_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase_ = model( input_ids[:, :-1] , attention_mask=snake_case_ , past_key_values=snake_case_ , position_ids=snake_case_ , ) lowerCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) lowerCamelCase_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case_ , position_ids=snake_case_ , ) lowerCamelCase_ = model(snake_case_ , attention_mask=snake_case_ ) lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class A( _a , _a , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () UpperCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = FlaxGPTJModelTester(self ) def a__ ( self : List[str] ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def a__ ( self : Tuple ) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) @tooslow def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) lowerCamelCase_ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=snake_case_ , truncation=snake_case_ ) lowerCamelCase_ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) lowerCamelCase_ = False lowerCamelCase_ = model.config.eos_token_id lowerCamelCase_ = jax.jit(model.generate ) lowerCamelCase_ = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences lowerCamelCase_ = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ ) lowerCamelCase_ = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(snake_case_ , snake_case_ ) @is_pt_flax_cross_test def a__ ( self : Optional[Any] ) -> str: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase_ = self._prepare_for_class(snake_case_ , snake_case_ ) lowerCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase_ = getattr(snake_case_ , snake_case_ ) lowerCamelCase_ , lowerCamelCase_ = pt_inputs['input_ids'].shape lowerCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case_ ): lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = pt_model_class(snake_case_ ).eval() lowerCamelCase_ = model_class(snake_case_ , dtype=jnp.floataa ) lowerCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case_ ) lowerCamelCase_ = fx_state with torch.no_grad(): lowerCamelCase_ = pt_model(**snake_case_ ).to_tuple() lowerCamelCase_ = fx_model(**snake_case_ ).to_tuple() self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(snake_case_ , snake_case_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case_ ) lowerCamelCase_ = model_class.from_pretrained(snake_case_ , from_pt=snake_case_ ) lowerCamelCase_ = fx_model_loaded(**snake_case_ ).to_tuple() self.assertEqual( len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(snake_case_ , snake_case_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase_ = self._prepare_for_class(snake_case_ , snake_case_ ) lowerCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase_ = getattr(snake_case_ , snake_case_ ) lowerCamelCase_ = pt_model_class(snake_case_ ).eval() lowerCamelCase_ = model_class(snake_case_ , dtype=jnp.floataa ) lowerCamelCase_ = load_flax_weights_in_pytorch_model(snake_case_ , fx_model.params ) lowerCamelCase_ , lowerCamelCase_ = pt_inputs['input_ids'].shape lowerCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case_ ): lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = 0 lowerCamelCase_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCamelCase_ = pt_model(**snake_case_ ).to_tuple() lowerCamelCase_ = fx_model(**snake_case_ ).to_tuple() self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(snake_case_ , snake_case_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case_ ) lowerCamelCase_ = pt_model_class.from_pretrained(snake_case_ , from_flax=snake_case_ ) with torch.no_grad(): lowerCamelCase_ = pt_model_loaded(**snake_case_ ).to_tuple() self.assertEqual( len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(snake_case_ , snake_case_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) lowerCamelCase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case_ )
720
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' return EnvironmentCommand() class A( UpperCamelCase ): '''simple docstring''' @staticmethod def a__ ( A_ : ArgumentParser ) -> str: """simple docstring""" lowerCamelCase_ = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = 'not installed' lowerCamelCase_ = 'NA' if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = 'not installed' if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = 'not installed' if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = 'not installed' if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def a__ ( A_ : Dict ) -> Any: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
651
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase : str = logging.get_logger(__name__) class A( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : str , A_ : Union[str, Any] = True , A_ : Dict = None , A_ : List[str] = None , A_ : int = PILImageResampling.BILINEAR , A_ : List[str] = True , A_ : List[Any] = 1 / 255 , A_ : Any = True , A_ : Dict = None , A_ : Optional[Any] = None , **A_ : str , ) -> None: """simple docstring""" super().__init__(**_lowercase ) lowerCamelCase_ = size if size is not None else {"""shortest_edge""": 384} lowerCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase ) lowerCamelCase_ = do_resize lowerCamelCase_ = size # Default value set here for backwards compatibility where the value in config is None lowerCamelCase_ = crop_pct if crop_pct is not None else 224 / 256 lowerCamelCase_ = resample lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def a__ ( self : str , A_ : Tuple , A_ : List[Any] , A_ : str , A_ : Optional[Any] = PILImageResampling.BICUBIC , A_ : Tuple = None , **A_ : int , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" ) lowerCamelCase_ = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCamelCase_ = int(shortest_edge / crop_pct ) lowerCamelCase_ = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase ) lowerCamelCase_ = resize(image=_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_lowercase , size=(shortest_edge, shortest_edge) , data_format=_lowercase , **_lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _lowercase , size=(shortest_edge, shortest_edge) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def a__ ( self : Union[str, Any] , A_ : str , A_ : str , A_ : Dict = None , **A_ : Tuple , ) -> Union[str, Any]: """simple docstring""" return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def a__ ( self : Dict , A_ : Any , A_ : Dict , A_ : Optional[Any] , A_ : List[str] = None , **A_ : str , ) -> np.ndarray: """simple docstring""" return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def a__ ( self : List[Any] , A_ : List[str] , A_ : Optional[Any] = None , A_ : Dict = None , A_ : Dict = None , A_ : Optional[Any] = None , A_ : int = None , A_ : List[str] = None , A_ : Optional[int] = None , A_ : Optional[Any] = None , A_ : str = None , A_ : List[Any] = None , A_ : int = ChannelDimension.FIRST , **A_ : Tuple , ) -> PIL.Image.Image: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = crop_pct if crop_pct is not None else self.crop_pct lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase ) lowerCamelCase_ = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(_lowercase ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(image=_lowercase , size=_lowercase , crop_pct=_lowercase , resample=_lowercase ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] lowerCamelCase_ = {"""pixel_values""": images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
721
from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 11 lowerCamelCase_ = int('1' + '0' * digit_len ) for num in range(lowercase , lowercase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase , lowercase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCamelCase_ = 10 return solutions def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ): '''simple docstring''' lowerCamelCase_ = 1.0 for fraction in fraction_list(lowercase ): lowerCamelCase_ = Fraction(lowercase ) result *= frac.denominator / frac.numerator return int(lowercase ) if __name__ == "__main__": print(solution())
651
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( lowercase : list[int] ): '''simple docstring''' if len(snake_case__ ) == 0: return array lowerCamelCase_ , lowerCamelCase_ = min(snake_case__ ), max(snake_case__ ) # Compute the variables lowerCamelCase_ = _max - _min + 1 lowerCamelCase_ , lowerCamelCase_ = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: lowerCamelCase_ = i - _min lowerCamelCase_ = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. lowerCamelCase_ = 0 for i in range(snake_case__ ): while holes_repeat[i] > 0: lowerCamelCase_ = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Optional[Any] = input("Enter numbers separated by comma:\n") lowerCamelCase : int = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
700
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = size if size is not None else {'shortest_edge': 224} lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] ) lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ ) lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ ) def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray: """simple docstring""" return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray: """simple docstring""" return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(A_ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images] lowerCamelCase_ = {'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
651
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A: '''simple docstring''' def __init__( self : Any , A_ : Union[str, Any] , A_ : Optional[int]=14 , A_ : List[str]=7 , A_ : Any=True , A_ : Dict=True , A_ : Union[str, Any]=False , A_ : Any=True , A_ : Optional[Any]=99 , A_ : str=32 , A_ : List[str]=4 , A_ : Union[str, Any]=4 , A_ : Union[str, Any]=4 , A_ : int=37 , A_ : List[str]="gelu" , A_ : str=0.1 , A_ : List[str]=0.1 , A_ : List[str]=512 , A_ : Union[str, Any]=0.02 , ) -> List[str]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = rotary_dim lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = initializer_range lowerCamelCase_ = None lowerCamelCase_ = vocab_size - 1 lowerCamelCase_ = vocab_size - 1 lowerCamelCase_ = vocab_size - 1 def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def a__ ( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def a__ ( self : Any , A_ : List[Any] , A_ : List[str] , A_ : Dict , A_ : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = 20 lowerCamelCase_ = model_class_name(A_ ) lowerCamelCase_ = model.init_cache(input_ids.shape[0] , A_ ) lowerCamelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) lowerCamelCase_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase_ = model( input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , ) lowerCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) lowerCamelCase_ = model( input_ids[:, -1:] , attention_mask=A_ , past_key_values=outputs_cache.past_key_values , position_ids=A_ , ) lowerCamelCase_ = model(A_ ) lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def a__ ( self : int , A_ : str , A_ : str , A_ : int , A_ : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_ = 20 lowerCamelCase_ = model_class_name(A_ ) lowerCamelCase_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowerCamelCase_ = model.init_cache(input_ids.shape[0] , A_ ) lowerCamelCase_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase_ = model( input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , ) lowerCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) lowerCamelCase_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A_ , position_ids=A_ , ) lowerCamelCase_ = model(A_ , attention_mask=A_ ) lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class A( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () UpperCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def a__ ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = FlaxGPTJModelTester(self ) def a__ ( self : List[str] ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(A_ , A_ , A_ , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( A_ , A_ , A_ , A_ ) @tooslow def a__ ( self : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) lowerCamelCase_ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=A_ , truncation=A_ ) lowerCamelCase_ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) lowerCamelCase_ = False lowerCamelCase_ = model.config.eos_token_id lowerCamelCase_ = jax.jit(model.generate ) lowerCamelCase_ = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences lowerCamelCase_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) lowerCamelCase_ = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(A_ , A_ ) @is_pt_flax_cross_test def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase_ = self._prepare_for_class(A_ , A_ ) lowerCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase_ = getattr(A_ , A_ ) lowerCamelCase_ , lowerCamelCase_ = pt_inputs['input_ids'].shape lowerCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A_ ): lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = pt_model_class(A_ ).eval() lowerCamelCase_ = model_class(A_ , dtype=jnp.floataa ) lowerCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ ) lowerCamelCase_ = fx_state with torch.no_grad(): lowerCamelCase_ = pt_model(**A_ ).to_tuple() lowerCamelCase_ = fx_model(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(A_ , A_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(A_ ) lowerCamelCase_ = model_class.from_pretrained(A_ , from_pt=A_ ) lowerCamelCase_ = fx_model_loaded(**A_ ).to_tuple() self.assertEqual( len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(A_ , A_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase_ = self._prepare_for_class(A_ , A_ ) lowerCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase_ = getattr(A_ , A_ ) lowerCamelCase_ = pt_model_class(A_ ).eval() lowerCamelCase_ = model_class(A_ , dtype=jnp.floataa ) lowerCamelCase_ = load_flax_weights_in_pytorch_model(A_ , fx_model.params ) lowerCamelCase_ , lowerCamelCase_ = pt_inputs['input_ids'].shape lowerCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A_ ): lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = 0 lowerCamelCase_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCamelCase_ = pt_model(**A_ ).to_tuple() lowerCamelCase_ = fx_model(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(A_ , A_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(A_ ) lowerCamelCase_ = pt_model_class.from_pretrained(A_ , from_flax=A_ ) with torch.no_grad(): lowerCamelCase_ = pt_model_loaded(**A_ ).to_tuple() self.assertEqual( len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(A_ , A_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def a__ ( self : Tuple ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) lowerCamelCase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(A_ )
701
import cva import numpy as np class A: '''simple docstring''' def __init__( self : int , A_ : float , A_ : int ) -> List[Any]: """simple docstring""" if k in (0.04, 0.06): lowerCamelCase_ = k lowerCamelCase_ = window_size else: raise ValueError('invalid k value' ) def __str__( self : str ) -> str: """simple docstring""" return str(self.k ) def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowerCamelCase_ = cva.imread(A_ , 0 ) lowerCamelCase_ , lowerCamelCase_ = img.shape lowerCamelCase_ = [] lowerCamelCase_ = img.copy() lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB ) lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ ) lowerCamelCase_ = dx**2 lowerCamelCase_ = dy**2 lowerCamelCase_ = dx * dy lowerCamelCase_ = 0.04 lowerCamelCase_ = self.window_size // 2 for y in range(A_ , h - offset ): for x in range(A_ , w - offset ): lowerCamelCase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = (wxx * wyy) - (wxy**2) lowerCamelCase_ = wxx + wyy lowerCamelCase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3) lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
651
0
import math def _SCREAMING_SNAKE_CASE ( lowercase : list , lowercase : int ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) lowerCamelCase_ = int(math.floor(math.sqrt(lowercase ) ) ) lowerCamelCase_ = 0 while arr[min(lowercase , lowercase ) - 1] < x: lowerCamelCase_ = step step += int(math.floor(math.sqrt(lowercase ) ) ) if prev >= n: return -1 while arr[prev] < x: lowerCamelCase_ = prev + 1 if prev == min(lowercase , lowercase ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": lowerCamelCase : Dict = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase : List[str] = [int(item) for item in user_input.split(",")] lowerCamelCase : List[str] = int(input("Enter the number to be searched:\n")) lowerCamelCase : Optional[int] = jump_search(arr, x) if res == -1: print("Number not found!") else: print(F"""Number {x} is at index {res}""")
702
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCamelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.encoder ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(A_ ): try: lowerCamelCase_ = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = new_word if len(A_ ) == 1: break else: lowerCamelCase_ = get_pairs(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = word return word def a__ ( self : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , A_ ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : str ) -> Optional[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def a__ ( self : Tuple , A_ : Dict ) -> List[Any]: """simple docstring""" return self.decoder.get(A_ ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ''.join(A_ ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) lowerCamelCase_ = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs) def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict: """simple docstring""" return token_ids_a + [self.eos_token_id] def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = self.encode(A_ ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
651
0
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : str , lowercase : Optional[int] ): '''simple docstring''' lowerCamelCase_ = 0 while b > 0: if b & 1: lowerCamelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
703
lowerCamelCase : Dict = "Alexander Joslin" import operator as op from .stack import Stack def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowercase ) ) elif i in operators: # RULE 2 operator_stack.push(lowercase ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](lowercase , lowercase ) operand_stack.push(lowercase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
651
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=_lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=_lowerCamelCase , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=_lowerCamelCase ) return parser.parse_args() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = parse_args() # Import training_script as a module. lowerCamelCase_ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase_ = script_fpath.stem lowerCamelCase_ = importlib.import_module(_lowerCamelCase ) # Patch sys.argv lowerCamelCase_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
704
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) print('The following activities are selected:' ) # The first activity is always selected lowerCamelCase_ = 0 print(lowercase , end=',' ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end=',' ) lowerCamelCase_ = j if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5] lowerCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
651
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCamelCase = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1_000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } lowerCamelCase = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1_000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } lowerCamelCase = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } lowerCamelCase = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } lowerCamelCase = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } lowerCamelCase = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' if isinstance(lowercase , lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : Any , lowercase : Tuple=False ): '''simple docstring''' lowerCamelCase_ = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: lowerCamelCase_ = checkpoint[f"""{old_prefix}.skip_connection.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Optional[Any]=None ): '''simple docstring''' lowerCamelCase_ = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) lowerCamelCase_ = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) lowerCamelCase_ = checkpoint[f"""{old_prefix}.norm.weight"""] lowerCamelCase_ = checkpoint[f"""{old_prefix}.norm.bias"""] lowerCamelCase_ = weight_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase_ = bias_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase_ = weight_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase_ = bias_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase_ = weight_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase_ = bias_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase_ = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) lowerCamelCase_ = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = torch.load(lowercase , map_location='cpu' ) lowerCamelCase_ = {} lowerCamelCase_ = checkpoint["""time_embed.0.weight"""] lowerCamelCase_ = checkpoint["""time_embed.0.bias"""] lowerCamelCase_ = checkpoint["""time_embed.2.weight"""] lowerCamelCase_ = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: lowerCamelCase_ = checkpoint["""label_emb.weight"""] lowerCamelCase_ = checkpoint["""input_blocks.0.0.weight"""] lowerCamelCase_ = checkpoint["""input_blocks.0.0.bias"""] lowerCamelCase_ = unet_config["""down_block_types"""] lowerCamelCase_ = unet_config["""layers_per_block"""] lowerCamelCase_ = unet_config["""attention_head_dim"""] lowerCamelCase_ = unet_config["""block_out_channels"""] lowerCamelCase_ = 1 lowerCamelCase_ = channels_list[0] for i, layer_type in enumerate(lowercase ): lowerCamelCase_ = channels_list[i] lowerCamelCase_ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowercase ): lowerCamelCase_ = f"""down_blocks.{i}.resnets.{j}""" lowerCamelCase_ = f"""input_blocks.{current_layer}.0""" lowerCamelCase_ = True if j == 0 and downsample_block_has_skip else False lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowercase ): lowerCamelCase_ = f"""down_blocks.{i}.resnets.{j}""" lowerCamelCase_ = f"""input_blocks.{current_layer}.0""" lowerCamelCase_ = True if j == 0 and downsample_block_has_skip else False lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase ) lowerCamelCase_ = f"""down_blocks.{i}.attentions.{j}""" lowerCamelCase_ = f"""input_blocks.{current_layer}.1""" lowerCamelCase_ = convert_attention( lowercase , lowercase , lowercase , lowercase , lowercase ) current_layer += 1 if i != len(lowercase ) - 1: lowerCamelCase_ = f"""down_blocks.{i}.downsamplers.0""" lowerCamelCase_ = f"""input_blocks.{current_layer}.0""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase ) current_layer += 1 lowerCamelCase_ = current_channels # hardcoded the mid-block for now lowerCamelCase_ = """mid_block.resnets.0""" lowerCamelCase_ = """middle_block.0""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = """mid_block.attentions.0""" lowerCamelCase_ = """middle_block.1""" lowerCamelCase_ = convert_attention(lowercase , lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = """mid_block.resnets.1""" lowerCamelCase_ = """middle_block.2""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = 0 lowerCamelCase_ = unet_config["""up_block_types"""] for i, layer_type in enumerate(lowercase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase_ = f"""up_blocks.{i}.resnets.{j}""" lowerCamelCase_ = f"""output_blocks.{current_layer}.0""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase ) current_layer += 1 if i != len(lowercase ) - 1: lowerCamelCase_ = f"""up_blocks.{i}.upsamplers.0""" lowerCamelCase_ = f"""output_blocks.{current_layer-1}.1""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase_ = f"""up_blocks.{i}.resnets.{j}""" lowerCamelCase_ = f"""output_blocks.{current_layer}.0""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase ) lowerCamelCase_ = f"""up_blocks.{i}.attentions.{j}""" lowerCamelCase_ = f"""output_blocks.{current_layer}.1""" lowerCamelCase_ = convert_attention( lowercase , lowercase , lowercase , lowercase , lowercase ) current_layer += 1 if i != len(lowercase ) - 1: lowerCamelCase_ = f"""up_blocks.{i}.upsamplers.0""" lowerCamelCase_ = f"""output_blocks.{current_layer-1}.2""" lowerCamelCase_ = convert_resnet(lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = checkpoint["""out.0.weight"""] lowerCamelCase_ = checkpoint["""out.0.bias"""] lowerCamelCase_ = checkpoint["""out.2.weight"""] lowerCamelCase_ = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") lowerCamelCase = parser.parse_args() lowerCamelCase = strabool(args.class_cond) lowerCamelCase = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: lowerCamelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCamelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCamelCase = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: lowerCamelCase = None lowerCamelCase = con_pt_to_diffuser(args.unet_path, unet_config) lowerCamelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCamelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCamelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") lowerCamelCase = CMStochasticIterativeScheduler(**scheduler_config) lowerCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
705
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride lowerCamelCase_ = out_features lowerCamelCase_ = out_indices def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : List[Any] ) -> Any: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = FocalNetModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase_ = None lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) UpperCamelCase = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self : Any ) -> Optional[int]: """simple docstring""" return def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def a__ ( self : int ) -> int: """simple docstring""" pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A_ ) , A_ ) # FocalNet has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape lowerCamelCase_ = ( reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) @slow def a__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = FocalNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(A_ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=A_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**A_ ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else () UpperCamelCase = FocalNetConfig UpperCamelCase = False def a__ ( self : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self )
651
0
import os import sys import unittest lowerCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCamelCase : Optional[Any] = os.path.join(git_repo_path, "src", "transformers") lowerCamelCase : str = "\n{0} = None\n" lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" lowerCamelCase : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' ) self.assertIsNone(a_ ) lowerCamelCase_ = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(a_ , 'tokenizers' ) lowerCamelCase_ = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(a_ , 'tensorflow_text' ) lowerCamelCase_ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(a_ , 'sentencepiece_and_tokenizers' ) lowerCamelCase_ = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(a_ , 'sentencepiece_and_tensorflow_text' ) lowerCamelCase_ = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(a_ , 'sentencepiece_and_tokenizers_and_vision' ) def a__ ( self : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , a_ ) self.assertIn('tensorflow_text' , a_ ) self.assertIn('sentencepiece_and_tokenizers' , a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def a__ ( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(a_ , '\nCONSTANT = None\n' ) lowerCamelCase_ = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( a_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) lowerCamelCase_ = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ lowerCamelCase_ = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(a_ , a_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ lowerCamelCase_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , a_ )
706
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ ) self.assertEqual( A_ , [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ] , ) lowerCamelCase_ = text_generator.model.config.eos_token_id lowerCamelCase_ = '<pad>' lowerCamelCase_ = text_generator( ['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , ) self.assertEqual( A_ , [ [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], ] , ) @require_tf def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ ) return text_generator, ["This is a test", "Another test"] def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = 'Hello I believe in' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) lowerCamelCase_ = text_generator(A_ ) self.assertEqual( A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' ) self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] ) def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = text_generator.model lowerCamelCase_ = text_generator.tokenizer lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ ) lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase_ = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase_ = text_generator('' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase_ = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500 , max_new_tokens=20 ) lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(A_ ): text_generator( 'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" import torch # Classic `model_kwargs` lowerCamelCase_ = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def a__ ( self : int ) -> str: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : List[Any] ) -> Dict: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=A_ , top_p=0.5 ) def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = 'Hello world' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' ) else: lowerCamelCase_ = logging.get_logger('transformers.generation.utils' ) lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 ) self.assertIn(A_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 ) self.assertNotIn(A_ , cl.out ) with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 ) self.assertNotIn(A_ , cl.out )
651
0
import torch from torch import nn class A( nn.Module ): '''simple docstring''' def __init__( self : Any , A_ : Union[str, Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any] , A_ : Optional[Any]=1 , A_ : List[Any]=False ) -> Dict: """simple docstring""" super().__init__() lowerCamelCase_ = n_token lowerCamelCase_ = d_embed lowerCamelCase_ = d_proj lowerCamelCase_ = cutoffs + [n_token] lowerCamelCase_ = [0] + self.cutoffs lowerCamelCase_ = div_val lowerCamelCase_ = self.cutoffs[0] lowerCamelCase_ = len(self.cutoffs ) - 1 lowerCamelCase_ = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowerCamelCase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowerCamelCase_ = nn.Parameter(torch.zeros(self.n_clusters ) ) lowerCamelCase_ = nn.ModuleList() lowerCamelCase_ = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) ) else: self.out_projs.append(__lowerCAmelCase ) self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) ) else: for i in range(len(self.cutoffs ) ): lowerCamelCase_ , lowerCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCamelCase_ = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) ) self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) ) lowerCamelCase_ = keep_order def a__ ( self : List[str] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Dict ) -> Optional[Any]: """simple docstring""" if proj is None: lowerCamelCase_ = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowerCamelCase_ = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() ) lowerCamelCase_ = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def a__ ( self : Union[str, Any] , A_ : Tuple , A_ : int=None , A_ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if labels is not None: # Shift so that tokens < n predict n lowerCamelCase_ = hidden[..., :-1, :].contiguous() lowerCamelCase_ = labels[..., 1:].contiguous() lowerCamelCase_ = hidden.view(-1 , hidden.size(-1 ) ) lowerCamelCase_ = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('Input and labels should have the same size in the batch dimension.' ) else: lowerCamelCase_ = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowerCamelCase_ = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowerCamelCase_ = labels != -100 lowerCamelCase_ = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device ) lowerCamelCase_ = ( -nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowerCamelCase_ = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) else: # construct weights and biases lowerCamelCase_ , lowerCamelCase_ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCamelCase_ , lowerCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCamelCase_ = self.out_layers[0].weight[l_idx:r_idx] lowerCamelCase_ = self.out_layers[0].bias[l_idx:r_idx] else: lowerCamelCase_ = self.out_layers[i].weight lowerCamelCase_ = self.out_layers[i].bias if i == 0: lowerCamelCase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCamelCase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__lowerCAmelCase ) biases.append(__lowerCAmelCase ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = weights[0], biases[0], self.out_projs[0] lowerCamelCase_ = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase_ = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) if labels is None: lowerCamelCase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowerCamelCase_ = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device ) lowerCamelCase_ = 0 lowerCamelCase_ = [0] + self.cutoffs for i in range(len(__lowerCAmelCase ) - 1 ): lowerCamelCase_ , lowerCamelCase_ = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowerCamelCase_ = (labels >= l_idx) & (labels < r_idx) lowerCamelCase_ = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowerCamelCase_ = labels.index_select(0 , __lowerCAmelCase ) - l_idx lowerCamelCase_ = head_logprob.index_select(0 , __lowerCAmelCase ) lowerCamelCase_ = hidden.index_select(0 , __lowerCAmelCase ) else: lowerCamelCase_ = hidden if i == 0: if labels is not None: lowerCamelCase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowerCamelCase_ = head_logprob[:, : self.cutoffs[0]] else: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = weights[i], biases[i], self.out_projs[i] lowerCamelCase_ = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase_ = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) lowerCamelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowerCamelCase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowerCamelCase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowerCamelCase_ = logprob_i if labels is not None: if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order: out.index_copy_(0 , __lowerCAmelCase , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def a__ ( self : int , A_ : Union[str, Any] ) -> int: """simple docstring""" if self.n_clusters == 0: lowerCamelCase_ = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) else: # construct weights and biases lowerCamelCase_ , lowerCamelCase_ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCamelCase_ , lowerCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCamelCase_ = self.out_layers[0].weight[l_idx:r_idx] lowerCamelCase_ = self.out_layers[0].bias[l_idx:r_idx] else: lowerCamelCase_ = self.out_layers[i].weight lowerCamelCase_ = self.out_layers[i].bias if i == 0: lowerCamelCase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCamelCase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__lowerCAmelCase ) biases.append(__lowerCAmelCase ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = weights[0], biases[0], self.out_projs[0] lowerCamelCase_ = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowerCamelCase_ = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) lowerCamelCase_ = [0] + self.cutoffs for i in range(len(__lowerCAmelCase ) - 1 ): lowerCamelCase_ , lowerCamelCase_ = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowerCamelCase_ = head_logprob[:, : self.cutoffs[0]] else: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = weights[i], biases[i], self.out_projs[i] lowerCamelCase_ = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase_ = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) lowerCamelCase_ = head_logprob[:, -i] + tail_logprob_i lowerCamelCase_ = logprob_i return out
707
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) lowerCamelCase_ = self.diffusers_dir shutil.copy( os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int: """simple docstring""" lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCamelCase_ = black.format_str(A_ , mode=A_ ) lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(A_ , 'w' , newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A_ ) with open(A_ , 'r' ) as f: self.assertTrue(f.read() , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , ) # Copy consistency with a really long name lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
651
0
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int lowerCamelCase : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class A( datasets.BuilderConfig ): '''simple docstring''' UpperCamelCase = None def _SCREAMING_SNAKE_CASE ( lowercase : "pyspark.sql.DataFrame" , lowercase : List[int] , ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase_ = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: lowerCamelCase_ = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' ) lowerCamelCase_ = partition_df.collect() lowerCamelCase_ = 0 for row in rows: yield f"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class A( _BaseExamplesIterable ): '''simple docstring''' def __init__( self : int , A_ : "pyspark.sql.DataFrame" , A_ : Optional[int]=None , ) -> List[str]: """simple docstring""" lowerCamelCase_ = df lowerCamelCase_ = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase_ = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ) -> str: """simple docstring""" yield from self.generate_examples_fn() def a__ ( self : Optional[int] , A_ : np.random.Generator ) -> List[Any]: """simple docstring""" lowerCamelCase_ = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) def a__ ( self : Optional[int] , A_ : int , A_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.split_shard_indices_by_worker(lowercase_ , lowercase_ ) return SparkExamplesIterable(self.df , partition_order=lowercase_ ) @property def a__ ( self : List[str] ) -> Dict: """simple docstring""" return len(self.partition_order ) class A( datasets.DatasetBuilder ): '''simple docstring''' UpperCamelCase = SparkConfig def __init__( self : Tuple , A_ : "pyspark.sql.DataFrame" , A_ : str = None , A_ : str = None , **A_ : str , ) -> Optional[Any]: """simple docstring""" import pyspark lowerCamelCase_ = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase_ = df lowerCamelCase_ = working_dir super().__init__( cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , ) def a__ ( self : str ) -> str: """simple docstring""" def create_cache_and_write_probe(A_ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase_ ) lowerCamelCase_ = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase_ , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase_ = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def a__ ( self : List[Any] , A_ : datasets.download.download_manager.DownloadManager ) -> Tuple: """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def a__ ( self : List[str] , A_ : Union[str, Any] ) -> Dict: """simple docstring""" import pyspark def get_arrow_batch_size(A_ : Any ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) lowerCamelCase_ = self.df.count() lowerCamelCase_ = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase_ = ( self.df.limit(lowercase_ ) .repartition(1 ) .mapInArrow(lowercase_ , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase_ = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase_ = min(lowercase_ , int(approx_total_size / max_shard_size ) ) lowerCamelCase_ = self.df.repartition(lowercase_ ) def a__ ( self : Any , A_ : str , A_ : str , A_ : int , ) -> Dict: """simple docstring""" import pyspark lowerCamelCase_ = ParquetWriter if file_format == """parquet""" else ArrowWriter lowerCamelCase_ = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath lowerCamelCase_ = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase_ = self.config.features lowerCamelCase_ = self._writer_batch_size lowerCamelCase_ = self._fs.storage_options def write_arrow(A_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase_ = pyspark.TaskContext().taskAttemptId() lowerCamelCase_ = next(lowercase_ , lowercase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) lowerCamelCase_ = 0 lowerCamelCase_ = writer_class( features=lowercase_ , path=working_fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowerCamelCase_ = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase_ = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 lowerCamelCase_ = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , ) lowerCamelCase_ = pa.Table.from_batches([batch] ) writer.write_table(lowercase_ ) if writer._num_bytes > 0: lowerCamelCase_ = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase_ ) ): lowerCamelCase_ = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) ) shutil.move(lowercase_ , lowercase_ ) lowerCamelCase_ = ( self.df.mapInArrow(lowercase_ , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def a__ ( self : Dict , A_ : "datasets.SplitGenerator" , A_ : str = "arrow" , A_ : Optional[Union[str, int]] = None , A_ : Optional[int] = None , **A_ : List[str] , ) -> Optional[Any]: """simple docstring""" self._validate_cache_dir() lowerCamelCase_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase_ ) lowerCamelCase_ = not is_remote_filesystem(self._fs ) lowerCamelCase_ = os.path.join if is_local else posixpath.join lowerCamelCase_ = """-TTTTT-SSSSS-of-NNNNN""" lowerCamelCase_ = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase_ = path_join(self._output_dir , lowercase_ ) lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = [] lowerCamelCase_ = [] for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ): ( lowerCamelCase_ ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase_ ) lowerCamelCase_ = total_num_examples lowerCamelCase_ = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase_ = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase_ = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( A_ : int , A_ : int , A_ : int , ): rename( lowercase_ , fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , f"""{global_shard_id:05d}""" ).replace('NNNNN' , f"""{total_shards:05d}""" ) , ) lowerCamelCase_ = [] lowerCamelCase_ = 0 for i in range(len(lowercase_ ) ): lowerCamelCase_ = task_id_and_num_shards[i] for shard_id in range(lowercase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda A_ : _rename_shard(*lowercase_ ) ).collect() else: # don't use any pattern lowerCamelCase_ = 0 lowerCamelCase_ = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , fpath.replace(lowercase_ , '' ) , ) def a__ ( self : Union[str, Any] , A_ : "datasets.SplitGenerator" , ) -> Optional[Any]: """simple docstring""" return SparkExamplesIterable(self.df )
708
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = None ops.enable_eager_execution_internal() lowerCamelCase_ = tf.config.list_physical_devices('CPU' ) if len(A_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' ) lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ = GradientAccumulator() lowerCamelCase_ = tf.Variable([4.0, 3.0] ) lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 ) lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ ) def accumulate_on_replica(A_ : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(A_ : List[Any] , A_ : Tuple ): with strategy.scope(): lowerCamelCase_ = strategy.experimental_local_results(A_ ) local_variables[0].assign(A_ ) local_variables[1].assign(A_ ) strategy.run(A_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A_ ) def _check_local_values(A_ : List[Any] , A_ : str ): lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
651
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : int = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
709
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg") lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = cn.convert_to_negative(lowercase ) # assert negative_img array for at least one True assert negative_img.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase_ = canny.canny(lowercase ) # assert canny array for at least one True assert canny_array.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase ) assert res.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert med.median_filter(lowercase , 3 ).any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase ) assert grad.any() and theta.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = sp.make_sepia(lowercase , 20 ) assert sepia.all() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCamelCase_ = imread(lowercase , 0 ) # Test for get_neighbors_pixel function() return not None lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = image[x_coordinate][y_coordinate] lowerCamelCase_ = lbp.get_neighbors_pixel( lowercase , lowercase , lowercase , lowercase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase ) assert lbp_image.any()
651
0
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = 1, 1 lowerCamelCase_ = 2 while True: lowerCamelCase_ = 0 lowerCamelCase_ = fa + fa lowerCamelCase_ , lowerCamelCase_ = fa, f index += 1 for _ in str(UpperCAmelCase__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
710
class A: '''simple docstring''' def __init__( self : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = {} def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int: """simple docstring""" if vertex not in self.adjacency: lowerCamelCase_ = {} self.num_vertices += 1 def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple: """simple docstring""" self.add_vertex(A_ ) self.add_vertex(A_ ) if head == tail: return lowerCamelCase_ = weight lowerCamelCase_ = weight def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for i in range(len(A_ ) ): lowerCamelCase_ = list(edges[i] ) edges.sort(key=lambda A_ : e[2] ) for i in range(len(A_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowerCamelCase_ = edges[i][2] + 1 for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = weight lowerCamelCase_ = weight def __str__( self : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = '' for tail in self.adjacency: for head in self.adjacency[tail]: lowerCamelCase_ = self.adjacency[head][tail] string += f"""{head} -> {tail} == {weight}\n""" return string.rstrip('\n' ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a__ ( self : List[str] ) -> int: """simple docstring""" return self.adjacency.keys() @staticmethod def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]: """simple docstring""" lowerCamelCase_ = Graph() if vertices is None: lowerCamelCase_ = [] if edges is None: lowerCamelCase_ = [] for vertex in vertices: g.add_vertex(A_ ) for edge in edges: g.add_edge(*A_ ) return g class A: '''simple docstring''' def __init__( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = {} lowerCamelCase_ = {} def __len__( self : Any ) -> List[str]: """simple docstring""" return len(self.parent ) def a__ ( self : List[str] , A_ : Any ) -> Dict: """simple docstring""" if item in self.parent: return self.find(A_ ) lowerCamelCase_ = item lowerCamelCase_ = 0 return item def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]: """simple docstring""" if item not in self.parent: return self.make_set(A_ ) if item != self.parent[item]: lowerCamelCase_ = self.find(self.parent[item] ) return self.parent[item] def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.find(A_ ) lowerCamelCase_ = self.find(A_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] < self.rank[roota]: lowerCamelCase_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowerCamelCase_ = roota return roota return None @staticmethod def a__ ( A_ : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = graph.num_vertices lowerCamelCase_ = Graph.UnionFind() lowerCamelCase_ = [] while num_components > 1: lowerCamelCase_ = {} for vertex in graph.get_vertices(): lowerCamelCase_ = -1 lowerCamelCase_ = graph.get_edges() for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge edges.remove((tail, head, weight) ) for edge in edges: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge lowerCamelCase_ = union_find.find(A_ ) lowerCamelCase_ = union_find.find(A_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCamelCase_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex] if union_find.find(A_ ) != union_find.find(A_ ): union_find.union(A_ , A_ ) mst_edges.append(cheap_edge[vertex] ) lowerCamelCase_ = num_components - 1 lowerCamelCase_ = Graph.build(edges=A_ ) return mst
651
0
from collections.abc import Sequence from queue import Queue class A: '''simple docstring''' def __init__( self : Tuple , A_ : Any , A_ : Tuple , A_ : List[str] , A_ : Dict=None , A_ : Optional[int]=None ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = start lowerCamelCase_ = end lowerCamelCase_ = val lowerCamelCase_ = (start + end) // 2 lowerCamelCase_ = left lowerCamelCase_ = right def __repr__( self : List[Any] ) -> Any: """simple docstring""" return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class A: '''simple docstring''' def __init__( self : List[str] , A_ : str , A_ : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = collection lowerCamelCase_ = function if self.collection: lowerCamelCase_ = self._build_tree(0 , len(UpperCAmelCase_ ) - 1 ) def a__ ( self : Tuple , A_ : int , A_ : str ) -> Dict: """simple docstring""" self._update_tree(self.root , UpperCAmelCase_ , UpperCAmelCase_ ) def a__ ( self : int , A_ : Optional[int] , A_ : Dict ) -> str: """simple docstring""" return self._query_range(self.root , UpperCAmelCase_ , UpperCAmelCase_ ) def a__ ( self : Union[str, Any] , A_ : Any , A_ : Optional[Any] ) -> List[str]: """simple docstring""" if start == end: return SegmentTreeNode(UpperCAmelCase_ , UpperCAmelCase_ , self.collection[start] ) lowerCamelCase_ = (start + end) // 2 lowerCamelCase_ = self._build_tree(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = self._build_tree(mid + 1 , UpperCAmelCase_ ) return SegmentTreeNode(UpperCAmelCase_ , UpperCAmelCase_ , self.fn(left.val , right.val ) , UpperCAmelCase_ , UpperCAmelCase_ ) def a__ ( self : Optional[int] , A_ : Optional[int] , A_ : str , A_ : List[Any] ) -> Tuple: """simple docstring""" if node.start == i and node.end == i: lowerCamelCase_ = val return if i <= node.mid: self._update_tree(node.left , UpperCAmelCase_ , UpperCAmelCase_ ) else: self._update_tree(node.right , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = self.fn(node.left.val , node.right.val ) def a__ ( self : Tuple , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Union[str, Any] ) -> List[str]: """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , UpperCAmelCase_ , UpperCAmelCase_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , UpperCAmelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCAmelCase_ ) , ) else: # range in right child tree return self._query_range(node.right , UpperCAmelCase_ , UpperCAmelCase_ ) def a__ ( self : Dict ) -> Optional[Any]: """simple docstring""" if self.root is not None: lowerCamelCase_ = Queue() queue.put(self.root ) while not queue.empty(): lowerCamelCase_ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("*" * 50) lowerCamelCase : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
711
def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = 0 for i in range(1 , 10_01 ): total += i**i return str(lowercase )[-10:] if __name__ == "__main__": print(solution())
651
0
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' raise RuntimeError('CUDA out of memory.' ) class A( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().__init__() lowerCamelCase_ = nn.Linear(3 , 4 ) lowerCamelCase_ = nn.BatchNormad(4 ) lowerCamelCase_ = nn.Linear(4 , 5 ) def a__ ( self : str , A_ : List[str] ) -> str: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) ) class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A_ : Any ): nonlocal batch_sizes batch_sizes.append(UpperCamelCase__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] ) def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" lowerCamelCase_ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A_ : Union[str, Any] , A_ : List[str] ): nonlocal batch_sizes batch_sizes.append(UpperCamelCase__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCamelCase_ = mock_training_loop_function('hello' ) self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, 'hello'] ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A_ : str ): pass with self.assertRaises(UpperCamelCase__ ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def a__ ( self : str ) -> Optional[Any]: """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A_ : Dict ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(UpperCamelCase__ ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A_ : int , A_ : int , A_ : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(UpperCamelCase__ ) as cm: mock_training_loop_function(128 , 'hello' , 'world' ) self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] ) self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] ) def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A_ : List[str] ): raise ValueError('Oops, we had an error!' ) with self.assertRaises(UpperCamelCase__ ) as cm: mock_training_loop_function() self.assertIn('Oops, we had an error!' , cm.exception.args[0] ) @require_cuda def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = torch.cuda.memory_allocated() lowerCamelCase_ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase__ ) lowerCamelCase_ = release_memory(UpperCamelCase__ ) self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase__ )
712
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"] lowerCamelCase : Dict = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
651
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
713
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCamelCase : int = datasets.logging.get_logger(__name__) lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ): '''simple docstring''' lowerCamelCase_ = {doc: key_lines} lowerCamelCase_ = {doc: sys_lines} lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) if remove_nested: lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase ) lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ): '''simple docstring''' lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 for name, metric in metrics: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: lowerCamelCase_ = (conll / 3) * 1_00 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: lowerCamelCase_ = line.split()[5] if not parse_col == "-": lowerCamelCase_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A( datasets.Metric ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]: """simple docstring""" lowerCamelCase_ = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: lowerCamelCase_ = util.check_gold_parse_annotation(A_ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase_ = evaluate( key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , ) return score
651
0
from dataclasses import dataclass, field from typing import Optional @dataclass class A: '''simple docstring''' UpperCamelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) UpperCamelCase = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) UpperCamelCase = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) UpperCamelCase = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) UpperCamelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) UpperCamelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) UpperCamelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) UpperCamelCase = field( default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) UpperCamelCase = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} ) UpperCamelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) UpperCamelCase = field( default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) UpperCamelCase = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) UpperCamelCase = field( default=UpperCAmelCase__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) UpperCamelCase = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} ) UpperCamelCase = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) UpperCamelCase = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} ) UpperCamelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} ) UpperCamelCase = field( default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) UpperCamelCase = field( default=UpperCAmelCase__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) UpperCamelCase = field(default=UpperCAmelCase__ , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) UpperCamelCase = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) UpperCamelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) UpperCamelCase = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) UpperCamelCase = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) UpperCamelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) UpperCamelCase = field(default=UpperCAmelCase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) UpperCamelCase = field( default=UpperCAmelCase__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) UpperCamelCase = field( default=UpperCAmelCase__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) UpperCamelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) UpperCamelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) UpperCamelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) UpperCamelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) UpperCamelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) UpperCamelCase = field( default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) UpperCamelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) UpperCamelCase = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) UpperCamelCase = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) UpperCamelCase = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default=UpperCAmelCase__ , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) UpperCamelCase = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) UpperCamelCase = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) UpperCamelCase = field( default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) UpperCamelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) UpperCamelCase = field( default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) UpperCamelCase = field( default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) UpperCamelCase = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) UpperCamelCase = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) UpperCamelCase = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) UpperCamelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) UpperCamelCase = field( default=UpperCAmelCase__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) UpperCamelCase = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) UpperCamelCase = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) UpperCamelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) UpperCamelCase = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) UpperCamelCase = field( default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) UpperCamelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) UpperCamelCase = field(default=UpperCAmelCase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) UpperCamelCase = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) UpperCamelCase = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) UpperCamelCase = field(default=UpperCAmelCase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) UpperCamelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) UpperCamelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) UpperCamelCase = field(default=UpperCAmelCase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
714
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase ) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''text''': Value('''string''' )} ) UpperCamelCase = Features({} ) UpperCamelCase = "text" @property def a__ ( self : List[Any] ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
651
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Optional[int] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = ["NllbTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ["NllbTokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowerCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''new-model''' if is_tf_available(): class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = NewModelConfig @require_tf class A( unittest.TestCase ): '''simple docstring''' @slow def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = 'bert-base-cased' lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : int ) -> str: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Any ) -> List[Any]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : Tuple ) -> str: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow def a__ ( self : List[Any] ) -> Any: """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) @slow @require_tensorflow_probability def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowerCamelCase_ = AutoConfig.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ ) lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained( A_ , output_loading_info=A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = copy.deepcopy(model.config ) lowerCamelCase_ = ['FunnelBaseModel'] lowerCamelCase_ = TFAutoModel.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = TFAutoModel.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def a__ ( self : Any ) -> Tuple: """simple docstring""" try: AutoConfig.register('new-model' , A_ ) lowerCamelCase_ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) auto_class.register(A_ , A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): auto_class.register(A_ , A_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCamelCase_ = BertModelTester(self ).get_config() lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() ) lowerCamelCase_ = auto_class.from_config(A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(A_ ) lowerCamelCase_ = auto_class.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def a__ ( self : int ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'bert-base is not a local folder and is not a valid model identifier' ): lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' ) def a__ ( self : Any ) -> Dict: """simple docstring""" with self.assertRaisesRegex( A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' ) def a__ ( self : str ) -> int: """simple docstring""" with self.assertRaisesRegex( A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def a__ ( self : Any ) -> List[Any]: """simple docstring""" with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ): lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
651
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ): '''simple docstring''' lowerCamelCase_ = botoa.client('iam' ) lowerCamelCase_ = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=_lowerCAmelCase , AssumeRolePolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) ) lowerCamelCase_ = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=_lowerCAmelCase , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"""role {role_name} already exists. Using existing one""" ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = botoa.client('iam' ) return iam_client.get_role(RoleName=_lowerCAmelCase )["Role"]["Arn"] def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = _ask_options( 'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _lowerCAmelCase , ) lowerCamelCase_ = None if credentials_configuration == 0: lowerCamelCase_ = _ask_field('Enter your AWS Profile name: [default] ' , default='default' ) lowerCamelCase_ = aws_profile else: print( 'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,' '`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' ) lowerCamelCase_ = _ask_field('AWS Access Key ID: ' ) lowerCamelCase_ = aws_access_key_id lowerCamelCase_ = _ask_field('AWS Secret Access Key: ' ) lowerCamelCase_ = aws_secret_access_key lowerCamelCase_ = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' ) lowerCamelCase_ = aws_region lowerCamelCase_ = _ask_options( 'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _lowerCAmelCase , ) if role_management == 0: lowerCamelCase_ = _ask_field('Enter your IAM role name: ' ) else: lowerCamelCase_ = "accelerate_sagemaker_execution_role" print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" ) _create_iam_role_for_sagemaker(_lowerCAmelCase ) lowerCamelCase_ = _ask_field( 'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) lowerCamelCase_ = None if is_custom_docker_image: lowerCamelCase_ = _ask_field('Enter your Docker image: ' , lambda lowercase : str(_lowerCAmelCase ).lower() ) lowerCamelCase_ = _ask_field( 'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) lowerCamelCase_ = None if is_sagemaker_inputs_enabled: lowerCamelCase_ = _ask_field( 'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda lowercase : str(_lowerCAmelCase ).lower() , ) lowerCamelCase_ = _ask_field( 'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) lowerCamelCase_ = None if is_sagemaker_metrics_enabled: lowerCamelCase_ = _ask_field( 'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda lowercase : str(_lowerCAmelCase ).lower() , ) lowerCamelCase_ = _ask_options( 'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , ) lowerCamelCase_ = {} lowerCamelCase_ = _ask_field( 'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) if use_dynamo: lowerCamelCase_ = "dynamo_" lowerCamelCase_ = _ask_options( 'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) lowerCamelCase_ = _ask_field( 'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) if use_custom_options: lowerCamelCase_ = _ask_options( 'Which mode do you want to use?' , _lowerCAmelCase , lambda lowercase : TORCH_DYNAMO_MODES[int(_lowerCAmelCase )] , default='default' , ) lowerCamelCase_ = _ask_field( 'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) lowerCamelCase_ = _ask_field( 'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message='Please enter yes or no.' , ) lowerCamelCase_ = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: lowerCamelCase_ = _ask_options( _lowerCAmelCase , _lowerCAmelCase , lambda lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCAmelCase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" lowerCamelCase_ = _ask_field(_lowerCAmelCase , lambda lowercase : str(_lowerCAmelCase ).lower() , default='ml.p3.2xlarge' ) lowerCamelCase_ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): lowerCamelCase_ = _ask_field( 'How many machines do you want use? [1]: ' , _lowerCAmelCase , default=1 , ) lowerCamelCase_ = _ask_options( 'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( 'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' ) return SageMakerConfig( image_uri=_lowerCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowerCAmelCase , use_cpu=_lowerCAmelCase , dynamo_config=_lowerCAmelCase , eca_instance_type=_lowerCAmelCase , profile=_lowerCAmelCase , region=_lowerCAmelCase , iam_role_name=_lowerCAmelCase , mixed_precision=_lowerCAmelCase , num_machines=_lowerCAmelCase , sagemaker_inputs_file=_lowerCAmelCase , sagemaker_metrics_file=_lowerCAmelCase , )
716
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[str] = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''gpt_neox_japanese''' def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_multiple_size lowerCamelCase_ = hidden_act lowerCamelCase_ = rotary_pct lowerCamelCase_ = rotary_emb_base lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = use_cache lowerCamelCase_ = attention_dropout lowerCamelCase_ = hidden_dropout
651
0
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) lowerCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ = model_type_to_module_name(lowercase ) lowerCamelCase_ = importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(lowercase , lowercase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowercase , '__name__' , lowercase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ = importlib.import_module('transformers' ) if hasattr(lowercase , lowercase ): return getattr(lowercase , lowercase ) return None def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, os.PathLike] , lowercase : Optional[Union[str, os.PathLike]] = None , lowercase : bool = False , lowercase : bool = False , lowercase : Optional[Dict[str, str]] = None , lowercase : Optional[Union[bool, str]] = None , lowercase : Optional[str] = None , lowercase : bool = False , **lowercase : Any , ): '''simple docstring''' lowerCamelCase_ = get_file_from_repo( lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , ) if resolved_config_file is None: logger.info( 'Could not locate the feature extractor configuration file, will try to use the model config instead.' ) return {} with open(lowercase , encoding='utf-8' ) as reader: return json.load(lowercase ) class A: '''simple docstring''' def __init__( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" raise EnvironmentError( 'AutoFeatureExtractor is designed to be instantiated ' 'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(A_ ) def a__ ( cls : Union[str, Any] , A_ : List[str] , **A_ : List[str] ) -> int: """simple docstring""" lowerCamelCase_ = kwargs.pop('config' , A_ ) lowerCamelCase_ = kwargs.pop('trust_remote_code' , A_ ) lowerCamelCase_ = True lowerCamelCase_ , lowerCamelCase_ = FeatureExtractionMixin.get_feature_extractor_dict(A_ , **A_ ) lowerCamelCase_ = config_dict.get('feature_extractor_type' , A_ ) lowerCamelCase_ = None if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): lowerCamelCase_ = config_dict['auto_map']['AutoFeatureExtractor'] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(A_ , A_ ): lowerCamelCase_ = AutoConfig.from_pretrained(A_ , **A_ ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ = getattr(A_ , 'feature_extractor_type' , A_ ) if hasattr(A_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ = config.auto_map['AutoFeatureExtractor'] if feature_extractor_class is not None: lowerCamelCase_ = feature_extractor_class_from_name(A_ ) lowerCamelCase_ = feature_extractor_auto_map is not None lowerCamelCase_ = feature_extractor_class is not None or type(A_ ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ = resolve_trust_remote_code( A_ , A_ , A_ , A_ ) if has_remote_code and trust_remote_code: lowerCamelCase_ = get_class_from_dynamic_module( A_ , A_ , **A_ ) lowerCamelCase_ = kwargs.pop('code_revision' , A_ ) if os.path.isdir(A_ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(A_ , **A_ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(A_ , **A_ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(A_ ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ = FEATURE_EXTRACTOR_MAPPING[type(A_ )] return feature_extractor_class.from_dict(A_ , **A_ ) raise ValueError( f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def a__ ( A_ : List[Any] , A_ : Optional[int] ) -> Any: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(A_ , A_ )
717
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowerCamelCase : List[Any] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowerCamelCase : Tuple = logging.getLogger() def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('-f' ) lowerCamelCase_ = parser.parse_args() return args.f def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ): '''simple docstring''' lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase , 'r' ) as f: return json.load(lowercase ) raise ValueError(f"""can't find {path}""" ) lowerCamelCase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_glue.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) @slow def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_clm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 100 ) @slow def a__ ( self : str ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A_ , 'argv' , A_ ): run_summarization_flax.main() lowerCamelCase_ = get_results(A_ , split='test' ) self.assertGreaterEqual(result['test_rouge1'] , 10 ) self.assertGreaterEqual(result['test_rouge2'] , 2 ) self.assertGreaterEqual(result['test_rougeL'] , 7 ) self.assertGreaterEqual(result['test_rougeLsum'] , 7 ) @slow def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertLess(result['eval_perplexity'] , 42 ) @slow def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , 'argv' , A_ ): run_ta_mlm_flax.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.42 ) @slow def a__ ( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2 lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A_ , 'argv' , A_ ): run_flax_ner.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertGreaterEqual(result['eval_f1'] , 0.3 ) @slow def a__ ( self : str ) -> int: """simple docstring""" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A_ , 'argv' , A_ ): run_qa.main() lowerCamelCase_ = get_results(A_ ) self.assertGreaterEqual(result['eval_f1'] , 30 ) self.assertGreaterEqual(result['eval_exact'] , 30 )
651
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : List[str] = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class A( UpperCAmelCase__ ): '''simple docstring''' UpperCamelCase = "bloom" UpperCamelCase = ["past_key_values"] UpperCamelCase = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self : Any , A_ : List[Any]=250880 , A_ : List[str]=64 , A_ : List[str]=2 , A_ : List[Any]=8 , A_ : Any=1E-5 , A_ : str=0.02 , A_ : List[Any]=True , A_ : str=1 , A_ : Tuple=2 , A_ : Any=False , A_ : Union[str, Any]=0.0 , A_ : Optional[Any]=0.0 , A_ : Optional[Any]=1 , A_ : List[str]=False , **A_ : Union[str, Any] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = vocab_size # Backward compatibility with n_embed kwarg lowerCamelCase_ = kwargs.pop('n_embed' , lowerCamelCase__ ) lowerCamelCase_ = hidden_size if n_embed is None else n_embed lowerCamelCase_ = n_layer lowerCamelCase_ = n_head lowerCamelCase_ = layer_norm_epsilon lowerCamelCase_ = initializer_range lowerCamelCase_ = use_cache lowerCamelCase_ = pretraining_tp lowerCamelCase_ = apply_residual_connection_post_layernorm lowerCamelCase_ = hidden_dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = bos_token_id lowerCamelCase_ = eos_token_id lowerCamelCase_ = slow_but_exact super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) class A( UpperCAmelCase__ ): '''simple docstring''' UpperCamelCase = version.parse('''1.12''' ) def __init__( self : List[Any] , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ) -> int: """simple docstring""" super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ ) if not getattr(self._config , 'pad_token_id' , lowerCamelCase__ ): # TODO: how to do that better? lowerCamelCase_ = 0 @property def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowerCamelCase_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' , inverted_values_shape=lowerCamelCase__ ) lowerCamelCase_ = {0: "batch", 1: "past_sequence + sequence"} else: lowerCamelCase_ = {0: "batch", 1: "sequence"} return common_inputs @property def a__ ( self : Dict ) -> int: """simple docstring""" return self._config.n_layer @property def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head @property def a__ ( self : Any ) -> float: """simple docstring""" return 1E-3 def a__ ( self : Union[str, Any] , A_ : "PreTrainedTokenizer" , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: """simple docstring""" lowerCamelCase_ = super(lowerCamelCase__ , self ).generate_dummy_inputs( lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ ) # We need to order the input in the way they appears in the forward() lowerCamelCase_ = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCamelCase_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase_ = seqlen + 2 lowerCamelCase_ = self._config.hidden_size // self.num_attention_heads lowerCamelCase_ = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowerCamelCase_ = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowerCamelCase_ = [ (torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers ) ] lowerCamelCase_ = common_inputs["attention_mask"] if self.use_past: lowerCamelCase_ = ordered_inputs["attention_mask"].dtype lowerCamelCase_ = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 ) return ordered_inputs @property def a__ ( self : List[Any] ) -> int: """simple docstring""" return 13
718
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = None UpperCamelCase = None lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess") def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left ) lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right ) lowerCamelCase_ = 1 - left_distrib_excess lowerCamelCase_ = 1 - right_distrib_excess lowerCamelCase_ = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) lowerCamelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
651
0
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = 9, 14 # noqa: F841 lowerCamelCase_ = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCamelCase_ = defaultdict(lowercase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowerCamelCase_ = mst(lowercase__ ) lowerCamelCase_ = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowerCamelCase_ = tuple(answer[:2] ) lowerCamelCase_ = tuple(edge[::-1] ) assert edge in result or reverse in result
719
from manim import * class A( UpperCamelCase ): '''simple docstring''' def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('CPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(4 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('GPU' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) gpu.move_to([-1, -1, 0] ) self.add(A_ ) lowerCamelCase_ = [mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Model' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) model.move_to([3, -1.0, 0] ) self.add(A_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for i, rect in enumerate(A_ ): lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 ) target.move_to(A_ ) model_arr.append(A_ ) lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(A_ ) self.add(*A_ , *A_ ) lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = [meta_mem.copy() for i in range(6 )] lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) lowerCamelCase_ = Text('Disk' , font_size=24 ) lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) disk.move_to([-4, -1.25, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(A_ , A_ ) lowerCamelCase_ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(A_ ) lowerCamelCase_ = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ ) ) lowerCamelCase_ = Square(0.3 ) input.set_fill(A_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , A_ , buff=0.5 ) self.play(Write(A_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 ) self.play(MoveToTarget(A_ ) ) self.play(FadeOut(A_ ) ) lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , A_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCamelCase_ = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) ) lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCamelCase_ = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCamelCase_ = AnimationGroup( FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(A_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCamelCase_ = 0.7 self.play( Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCamelCase_ = a_c lowerCamelCase_ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , ) lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) ) self.wait()
651
0
import os def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' with open(os.path.dirname(UpperCamelCase__ ) + '/grid.txt' ) as f: lowerCamelCase_ = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) lowerCamelCase_ = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase_ = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase_ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase_ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase_ = temp return maximum if __name__ == "__main__": print(solution())
720
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' return EnvironmentCommand() class A( UpperCamelCase ): '''simple docstring''' @staticmethod def a__ ( A_ : ArgumentParser ) -> str: """simple docstring""" lowerCamelCase_ = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = 'not installed' lowerCamelCase_ = 'NA' if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = 'not installed' if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = 'not installed' if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = 'not installed' if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def a__ ( A_ : Dict ) -> Any: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
651
0
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCamelCase : Dict = logging.getLogger(__name__) lowerCamelCase : Optional[int] = tf.data.AUTOTUNE def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser(description='Train a masked language model on TPU.' ) parser.add_argument( '--pretrained_model_config' , type=_UpperCamelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , ) parser.add_argument( '--tokenizer' , type=_UpperCamelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , ) parser.add_argument( '--per_replica_batch_size' , type=_UpperCamelCase , default=8 , help='Batch size per TPU core.' , ) parser.add_argument( '--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , ) parser.add_argument( '--tpu_name' , type=_UpperCamelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , ) parser.add_argument( '--tpu_zone' , type=_UpperCamelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , ) parser.add_argument( '--gcp_project' , type=_UpperCamelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' ) parser.add_argument( '--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , ) parser.add_argument( '--train_dataset' , type=_UpperCamelCase , help='Path to training dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' , ) parser.add_argument( '--shuffle_buffer_size' , type=_UpperCamelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , ) parser.add_argument( '--eval_dataset' , type=_UpperCamelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' , ) parser.add_argument( '--num_epochs' , type=_UpperCamelCase , default=1 , help='Number of epochs to train for.' , ) parser.add_argument( '--learning_rate' , type=_UpperCamelCase , default=1e-4 , help='Learning rate to use for training.' , ) parser.add_argument( '--weight_decay_rate' , type=_UpperCamelCase , default=1e-3 , help='Weight decay rate to use for training.' , ) parser.add_argument( '--max_length' , type=_UpperCamelCase , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , ) parser.add_argument( '--mlm_probability' , type=_UpperCamelCase , default=0.15 , help='Fraction of tokens to mask during training.' , ) parser.add_argument('--output_dir' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to save model checkpoints to.' ) parser.add_argument('--hub_model_id' , type=_UpperCamelCase , help='Model ID to upload to on the Hugging Face Hub.' ) lowerCamelCase_ = parser.parse_args() return args def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' try: if args.tpu_name: lowerCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: lowerCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( 'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ' '--gcp_project. When running on a TPU VM, use --tpu_name local.' ) tf.config.experimental_connect_to_cluster(_UpperCamelCase ) tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase ) return tpu def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = 0 for file in file_list: lowerCamelCase_ = file.split('/' )[-1] lowerCamelCase_ = re.search(r'-\d+-(\d+)\.tfrecord' , _UpperCamelCase ).group(1 ) lowerCamelCase_ = int(_UpperCamelCase ) num_samples += sample_count return num_samples def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int , lowercase : Dict , lowercase : Any , lowercase : Tuple , lowercase : int=None ): '''simple docstring''' lowerCamelCase_ = count_samples(_UpperCamelCase ) lowerCamelCase_ = tf.data.Dataset.from_tensor_slices(_UpperCamelCase ) if shuffle: lowerCamelCase_ = dataset.shuffle(len(_UpperCamelCase ) ) lowerCamelCase_ = tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowerCamelCase_ = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) ) lowerCamelCase_ = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase ) if shuffle: assert shuffle_buffer_size is not None lowerCamelCase_ = dataset.shuffle(args.shuffle_buffer_size ) lowerCamelCase_ = dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase ) lowerCamelCase_ = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase ) lowerCamelCase_ = dataset.prefetch(_UpperCamelCase ) return dataset def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' if not args.no_tpu: lowerCamelCase_ = initialize_tpu(_UpperCamelCase ) lowerCamelCase_ = tf.distribute.TPUStrategy(_UpperCamelCase ) else: lowerCamelCase_ = tf.distribute.OneDeviceStrategy(device='/gpu:0' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' ) lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer ) lowerCamelCase_ = AutoConfig.from_pretrained(args.pretrained_model_config ) lowerCamelCase_ = tokenizer.vocab_size lowerCamelCase_ = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) lowerCamelCase_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) lowerCamelCase_ = count_samples(_UpperCamelCase ) lowerCamelCase_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowerCamelCase_ = steps_per_epoch * args.num_epochs with strategy.scope(): lowerCamelCase_ = TFAutoModelForMaskedLM.from_config(_UpperCamelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowerCamelCase_ , lowerCamelCase_ = create_optimizer( num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_UpperCamelCase , metrics=['accuracy'] ) def decode_fn(lowercase : List[str] ): lowerCamelCase_ = { 'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), 'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowerCamelCase_ = DataCollatorForLanguageModeling( tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors='tf' ) def mask_with_collator(lowercase : Optional[Any] ): # TF really needs an isin() function lowerCamelCase_ = ( ~tf.cast(batch['attention_mask'] , tf.bool ) | (batch['input_ids'] == tokenizer.cls_token_id) | (batch['input_ids'] == tokenizer.sep_token_id) ) lowerCamelCase_ , lowerCamelCase_ = data_collator.tf_mask_tokens( batch['input_ids'] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , ) return batch lowerCamelCase_ = args.per_replica_batch_size * strategy.num_replicas_in_sync lowerCamelCase_ = prepare_dataset( _UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) lowerCamelCase_ = prepare_dataset( _UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , ) lowerCamelCase_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) ) model.fit( _UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCamelCase : Optional[Any] = parse_args() main(args)
721
from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 11 lowerCamelCase_ = int('1' + '0' * digit_len ) for num in range(lowercase , lowercase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase , lowercase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCamelCase_ = 10 return solutions def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ): '''simple docstring''' lowerCamelCase_ = 1.0 for fraction in fraction_list(lowercase ): lowerCamelCase_ = Fraction(lowercase ) result *= frac.denominator / frac.numerator return int(lowercase ) if __name__ == "__main__": print(solution())
651
0
import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input lowerCamelCase : int = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ = get_sagemaker_input() else: lowerCamelCase_ = get_cluster_input() return config def _SCREAMING_SNAKE_CASE ( lowercase : List[str]=None ): '''simple docstring''' if subparsers is not None: lowerCamelCase_ = subparsers.add_parser('config' , description=lowercase ) else: lowerCamelCase_ = argparse.ArgumentParser('Accelerate config command' , description=lowercase ) parser.add_argument( '--config_file' , default=lowercase , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase ) return parser def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = get_user_input() if args.config_file is not None: lowerCamelCase_ = args.config_file else: if not os.path.isdir(lowercase ): os.makedirs(lowercase ) lowerCamelCase_ = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(lowercase ) else: config.to_yaml_file(lowercase ) print(f"""accelerate configuration saved at {config_file}""" ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = config_command_parser() lowerCamelCase_ = parser.parse_args() config_command(lowercase ) if __name__ == "__main__": main()
700
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = ['''pixel_values'''] def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = size if size is not None else {'shortest_edge': 224} lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] ) lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ ) lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ ) def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray: """simple docstring""" return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray: """simple docstring""" return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(A_ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images] lowerCamelCase_ = {'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
651
0
import string import numpy def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ): '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class A: '''simple docstring''' UpperCamelCase = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) UpperCamelCase = numpy.vectorize(lambda UpperCamelCase : x % 36 ) UpperCamelCase = numpy.vectorize(UpperCamelCase ) def __init__( self : Dict , A_ : numpy.ndarray ) -> None: """simple docstring""" lowerCamelCase_ = self.modulus(A_ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key lowerCamelCase_ = encrypt_key.shape[0] def a__ ( self : str , A_ : str ) -> int: """simple docstring""" return self.key_string.index(A_ ) def a__ ( self : Any , A_ : int ) -> str: """simple docstring""" return self.key_string[round(A_ )] def a__ ( self : Dict ) -> None: """simple docstring""" lowerCamelCase_ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCamelCase_ = det % len(self.key_string ) lowerCamelCase_ = len(self.key_string ) if greatest_common_divisor(A_ , len(self.key_string ) ) != 1: lowerCamelCase_ = ( f"""determinant modular {req_l} of encryption key({det}) """ f"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(A_ ) def a__ ( self : Dict , A_ : str ) -> str: """simple docstring""" lowerCamelCase_ = [char for char in text.upper() if char in self.key_string] lowerCamelCase_ = chars[-1] while len(A_ ) % self.break_key != 0: chars.append(A_ ) return "".join(A_ ) def a__ ( self : List[str] , A_ : str ) -> str: """simple docstring""" lowerCamelCase_ = self.process_text(text.upper() ) lowerCamelCase_ = '' for i in range(0 , len(A_ ) - self.break_key + 1 , self.break_key ): lowerCamelCase_ = text[i : i + self.break_key] lowerCamelCase_ = [self.replace_letters(A_ ) for char in batch] lowerCamelCase_ = numpy.array([vec] ).T lowerCamelCase_ = self.modulus(self.encrypt_key.dot(A_ ) ).T.tolist()[ 0 ] lowerCamelCase_ = ''.join( self.replace_digits(A_ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def a__ ( self : int ) -> numpy.ndarray: """simple docstring""" lowerCamelCase_ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCamelCase_ = det % len(self.key_string ) lowerCamelCase_ = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: lowerCamelCase_ = i break lowerCamelCase_ = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(A_ ) ) def a__ ( self : Optional[int] , A_ : str ) -> str: """simple docstring""" lowerCamelCase_ = self.make_decrypt_key() lowerCamelCase_ = self.process_text(text.upper() ) lowerCamelCase_ = '' for i in range(0 , len(A_ ) - self.break_key + 1 , self.break_key ): lowerCamelCase_ = text[i : i + self.break_key] lowerCamelCase_ = [self.replace_letters(A_ ) for char in batch] lowerCamelCase_ = numpy.array([vec] ).T lowerCamelCase_ = self.modulus(decrypt_key.dot(A_ ) ).T.tolist()[0] lowerCamelCase_ = ''.join( self.replace_digits(A_ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = int(input('Enter the order of the encryption key: ' ) ) lowerCamelCase_ = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) lowerCamelCase_ = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": lowerCamelCase_ = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": lowerCamelCase_ = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
701
import cva import numpy as np class A: '''simple docstring''' def __init__( self : int , A_ : float , A_ : int ) -> List[Any]: """simple docstring""" if k in (0.04, 0.06): lowerCamelCase_ = k lowerCamelCase_ = window_size else: raise ValueError('invalid k value' ) def __str__( self : str ) -> str: """simple docstring""" return str(self.k ) def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowerCamelCase_ = cva.imread(A_ , 0 ) lowerCamelCase_ , lowerCamelCase_ = img.shape lowerCamelCase_ = [] lowerCamelCase_ = img.copy() lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB ) lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ ) lowerCamelCase_ = dx**2 lowerCamelCase_ = dy**2 lowerCamelCase_ = dx * dy lowerCamelCase_ = 0.04 lowerCamelCase_ = self.window_size // 2 for y in range(A_ , h - offset ): for x in range(A_ , w - offset ): lowerCamelCase_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase_ = (wxx * wyy) - (wxy**2) lowerCamelCase_ = wxx + wyy lowerCamelCase_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3) lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
651
0
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _SCREAMING_SNAKE_CASE ( *lowercase : Optional[Any] , lowercase : Union[str, Any] = None , lowercase : Any=True , lowercase : int=2 ): '''simple docstring''' from .. import __version__ lowerCamelCase_ = take_from lowerCamelCase_ = () if not isinstance(args[0] , lowercase__ ): lowerCamelCase_ = (args,) for attribute, version_name, message in args: if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ): raise ValueError( f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" f""" version {__version__} is >= {version_name}""" ) lowerCamelCase_ = None if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(lowercase__ ),) lowerCamelCase_ = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(lowercase__ , lowercase__ ): values += (getattr(lowercase__ , lowercase__ ),) lowerCamelCase_ = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: lowerCamelCase_ = f"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: lowerCamelCase_ = warning + ' ' if standard_warn else '' warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0: lowerCamelCase_ = inspect.getouterframes(inspect.currentframe() )[1] lowerCamelCase_ = call_frame.filename lowerCamelCase_ = call_frame.lineno lowerCamelCase_ = call_frame.function lowerCamelCase_ , lowerCamelCase_ = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(lowercase__ ) == 0: return elif len(lowercase__ ) == 1: return values[0] return values
702
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCamelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.encoder ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(A_ ): try: lowerCamelCase_ = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = new_word if len(A_ ) == 1: break else: lowerCamelCase_ = get_pairs(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = word return word def a__ ( self : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , A_ ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : str ) -> Optional[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def a__ ( self : Tuple , A_ : Dict ) -> List[Any]: """simple docstring""" return self.decoder.get(A_ ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ''.join(A_ ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) lowerCamelCase_ = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs) def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict: """simple docstring""" return token_ids_a + [self.eos_token_id] def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = self.encode(A_ ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
651
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) class A( a__ ): '''simple docstring''' UpperCamelCase = ["""pixel_values"""] def __init__( self : List[Any] , A_ : List[Any] = True , A_ : int = None , A_ : Union[str, Any] = PILImageResampling.BICUBIC , A_ : Optional[Any] = True , A_ : List[Any] = None , A_ : List[str] = True , A_ : Optional[int] = 1 / 255 , A_ : Any = True , A_ : Union[str, Any] = IMAGENET_DEFAULT_MEAN , A_ : Tuple = IMAGENET_DEFAULT_STD , **A_ : Union[str, Any] , ) -> None: """simple docstring""" super().__init__(**lowercase__ ) lowerCamelCase_ = size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ = get_size_dict(lowercase__ , default_to_square=lowercase__ ) lowerCamelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ = get_size_dict(lowercase__ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Tuple , A_ : Dict , A_ : List[str] , A_ : str = PILImageResampling.BICUBIC , A_ : str = None , **A_ : Union[str, Any] , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(lowercase__ , default_to_square=lowercase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] ) lowerCamelCase_ = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) lowerCamelCase_ = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}""" ) return resize( lowercase__ , size=(size_dict['height'], size_dict['width']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def a__ ( self : str , A_ : Any , A_ : Dict , A_ : str = None , **A_ : Optional[int] , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys \'height\' and \'width\'. Got {size.keys()}""" ) return center_crop(lowercase__ , size=(size['height'], size['width']) , data_format=lowercase__ , **lowercase__ ) def a__ ( self : Any , A_ : str , A_ : Dict , A_ : Tuple = None , **A_ : Dict , ) -> np.ndarray: """simple docstring""" return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def a__ ( self : Optional[int] , A_ : Tuple , A_ : Tuple , A_ : Optional[Any] , A_ : Any = None , **A_ : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def a__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[str] = None , A_ : Optional[Any] = None , A_ : Union[str, Any] = None , A_ : Dict = None , A_ : Optional[int] = None , A_ : Union[str, Any] = None , A_ : Tuple = None , A_ : Optional[int] = None , A_ : Any = None , A_ : Union[str, Any] = None , A_ : List[str] = None , A_ : Optional[int] = ChannelDimension.FIRST , **A_ : Optional[Any] , ) -> BatchFeature: """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(lowercase__ , default_to_square=lowercase__ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(lowercase__ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(lowercase__ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(lowercase__ , lowercase__ , lowercase__ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(lowercase__ , lowercase__ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(lowercase__ , lowercase__ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(lowercase__ , lowercase__ , lowercase__ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] lowerCamelCase_ = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
703
lowerCamelCase : Dict = "Alexander Joslin" import operator as op from .stack import Stack def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowercase ) ) elif i in operators: # RULE 2 operator_stack.push(lowercase ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](lowercase , lowercase ) operand_stack.push(lowercase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
651
0
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A( _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = CLIPTokenizer UpperCamelCase = CLIPTokenizerFast UpperCamelCase = True UpperCamelCase = {} UpperCamelCase = False def a__ ( self : Tuple ) -> Tuple: """simple docstring""" super().setUp() # fmt: off lowerCamelCase_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowerCamelCase_ = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) lowerCamelCase_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""] lowerCamelCase_ = {"""unk_token""": """<unk>"""} lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase__ ) ) def a__ ( self : str , **A_ : Tuple ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def a__ ( self : str , **A_ : Any ) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ ) def a__ ( self : List[str] , A_ : Tuple ) -> int: """simple docstring""" lowerCamelCase_ = """lower newer""" lowerCamelCase_ = """lower newer""" return input_text, output_text def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase_ = """lower newer""" lowerCamelCase_ = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""] lowerCamelCase_ = tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) lowerCamelCase_ = tokens + [tokenizer.unk_token] lowerCamelCase_ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) @require_ftfy def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) lowerCamelCase_ = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d.""" lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ ) lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways lowerCamelCase_ = """xa\u0303y""" + """ """ + """x\xe3y""" lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ ) lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) # Test that the tokenization is identical on unicode of space type lowerCamelCase_ = [ """\u0009""", # (horizontal tab, '\t') """\u000B""", # (vertical tab) """\u000C""", # (form feed) """\u0020""", # (space, ' ') """\u200E""", # (left-to-right mark):w """\u200F""", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ ) lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) # Test that the tokenization is identical on unicode of line break type lowerCamelCase_ = [ """\u000A""", # (line feed, '\n') """\r\n""", # (carriage return and line feed, '\r\n') """\u000D""", # (carriage return, '\r') """\r""", # (carriage return, '\r') """\u000D""", # (carriage return, '\r') """\u2028""", # (line separator) """\u2029""", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: lowerCamelCase_ = tokenizer_s.tokenize(lowercase__ ) lowerCamelCase_ = tokenizer_r.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` lowerCamelCase_ = f"""{text_of_1_token} {text_of_1_token}""" lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , ) lowerCamelCase_ = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) lowerCamelCase_ = f""" {text}""" lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , ) lowerCamelCase_ = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" with self.assertRaises(lowercase__ ) as context: self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' ) self.assertTrue( context.exception.args[0].startswith( 'The `backend_tokenizer` provided does not match the expected format.' ) ) @require_ftfy def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" super().test_tokenization_python_rust_equals() def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" pass
704
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ): '''simple docstring''' lowerCamelCase_ = len(lowercase ) print('The following activities are selected:' ) # The first activity is always selected lowerCamelCase_ = 0 print(lowercase , end=',' ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end=',' ) lowerCamelCase_ = j if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5] lowerCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
651
0
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00 ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = 0 lowerCamelCase_ = n + 1 # maximum limit for a in range(2 , a_ ): for b in range(2 , a_ ): lowerCamelCase_ = a**b # calculates the current power collect_powers.add(a_ ) # adds the result to the set return len(a_ ) if __name__ == "__main__": print("Number of terms ", solution(int(str(input()).strip())))
705
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride lowerCamelCase_ = out_features lowerCamelCase_ = out_indices def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def a__ ( self : List[Any] ) -> Any: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ = FocalNetModel(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase_ = None lowerCamelCase_ = FocalNetBackbone(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = FocalNetForImageClassification(A_ ) model.to(A_ ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) UpperCamelCase = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self : Any ) -> Optional[int]: """simple docstring""" return def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def a__ ( self : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def a__ ( self : int ) -> int: """simple docstring""" pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A_ ) , A_ ) # FocalNet has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(A_ ) , A_ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape lowerCamelCase_ = ( reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , A_ ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) ) @slow def a__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = FocalNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def a__ ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(A_ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=A_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class A( unittest.TestCase ): '''simple docstring''' @cached_property def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**A_ ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else () UpperCamelCase = FocalNetConfig UpperCamelCase = False def a__ ( self : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ = FocalNetModelTester(self )
651
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # TODO Update this lowerCamelCase : int = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = "esm" def __init__( self : Optional[Any] , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Dict=None , A_ : Any=768 , A_ : List[str]=12 , A_ : Dict=12 , A_ : List[Any]=3072 , A_ : Any=0.1 , A_ : Tuple=0.1 , A_ : Any=1026 , A_ : Optional[Any]=0.02 , A_ : Optional[int]=1E-12 , A_ : List[str]="absolute" , A_ : int=True , A_ : Optional[int]=None , A_ : List[Any]=False , A_ : Optional[int]=False , A_ : Optional[Any]=None , A_ : Dict=None , **A_ : Optional[int] , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=A_ , mask_token_id=A_ , **A_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = position_embedding_type lowerCamelCase_ = use_cache lowerCamelCase_ = emb_layer_norm_before lowerCamelCase_ = token_dropout lowerCamelCase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) lowerCamelCase_ = EsmFoldConfig() elif isinstance(A_ , A_ ): lowerCamelCase_ = EsmFoldConfig(**A_ ) lowerCamelCase_ = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) lowerCamelCase_ = get_default_vocab_list() else: lowerCamelCase_ = vocab_list else: lowerCamelCase_ = None lowerCamelCase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , A_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def a__ ( self : Any ) -> List[str]: """simple docstring""" lowerCamelCase_ = super().to_dict() if isinstance(self.esmfold_config , A_ ): lowerCamelCase_ = self.esmfold_config.to_dict() return output @dataclass class A: '''simple docstring''' UpperCamelCase = None UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = 0 UpperCamelCase = True UpperCamelCase = False UpperCamelCase = 128 UpperCamelCase = None def a__ ( self : Any ) -> Optional[int]: """simple docstring""" if self.trunk is None: lowerCamelCase_ = TrunkConfig() elif isinstance(self.trunk , A_ ): lowerCamelCase_ = TrunkConfig(**self.trunk ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ = asdict(self ) lowerCamelCase_ = self.trunk.to_dict() return output @dataclass class A: '''simple docstring''' UpperCamelCase = 48 UpperCamelCase = 1024 UpperCamelCase = 128 UpperCamelCase = 32 UpperCamelCase = 32 UpperCamelCase = 32 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = False UpperCamelCase = 4 UpperCamelCase = 128 UpperCamelCase = None def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" if self.structure_module is None: lowerCamelCase_ = StructureModuleConfig() elif isinstance(self.structure_module , A_ ): lowerCamelCase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowerCamelCase_ = self.sequence_state_dim // self.sequence_head_width lowerCamelCase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = asdict(self ) lowerCamelCase_ = self.structure_module.to_dict() return output @dataclass class A: '''simple docstring''' UpperCamelCase = 384 UpperCamelCase = 128 UpperCamelCase = 16 UpperCamelCase = 128 UpperCamelCase = 12 UpperCamelCase = 4 UpperCamelCase = 8 UpperCamelCase = 0.1 UpperCamelCase = 8 UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = 7 UpperCamelCase = 10 UpperCamelCase = 1e-8 UpperCamelCase = 1e5 def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" return asdict(self ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
706
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ ) self.assertEqual( A_ , [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ] , ) lowerCamelCase_ = text_generator.model.config.eos_token_id lowerCamelCase_ = '<pad>' lowerCamelCase_ = text_generator( ['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , ) self.assertEqual( A_ , [ [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], [ {'generated_token_ids': ANY(A_ )}, {'generated_token_ids': ANY(A_ )}, ], ] , ) @require_tf def a__ ( self : Optional[int] ) -> str: """simple docstring""" lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ ) self.assertEqual( A_ , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ ) return text_generator, ["This is a test", "Another test"] def a__ ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ = 'Hello I believe in' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) lowerCamelCase_ = text_generator(A_ ) self.assertEqual( A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' ) self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] ) def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = text_generator.model lowerCamelCase_ = text_generator.tokenizer lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ ) lowerCamelCase_ = text_generator('This is a test' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase_ = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ ) self.assertEqual( A_ , [ [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], [{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}], ] , ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ ) with self.assertRaises(A_ ): lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase_ = text_generator('' ) self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase_ = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500 , max_new_tokens=20 ) lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(A_ ): text_generator( 'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" import torch # Classic `model_kwargs` lowerCamelCase_ = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase_ = pipe('This is a test' ) self.assertEqual( A_ , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def a__ ( self : int ) -> str: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : List[Any] ) -> Dict: """simple docstring""" import torch lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=A_ , top_p=0.5 ) def a__ ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ = 'Hello world' lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' ) else: lowerCamelCase_ = logging.get_logger('transformers.generation.utils' ) lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 ) self.assertIn(A_ , cl.out ) # The user only sets one -> no warning with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 ) self.assertNotIn(A_ , cl.out ) with CaptureLogger(A_ ) as cl: lowerCamelCase_ = text_generator(A_ , max_length=10 ) self.assertNotIn(A_ , cl.out )
651
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' return len(set(lowercase_ ) ) == len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
707
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) lowerCamelCase_ = self.diffusers_dir shutil.copy( os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int: """simple docstring""" lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCamelCase_ = black.format_str(A_ , mode=A_ ) lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(A_ , 'w' , newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A_ ) with open(A_ , 'r' ) as f: self.assertTrue(f.read() , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , ) # Copy consistency with a really long name lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
651
0