code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = 0 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_A , _A ) def _lowercase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = Path(_A ) / '''preprocessor_config.json''' UpperCAmelCase = Path(_A ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) ) UpperCAmelCase = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def _lowercase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = Path(_A ) / '''preprocessor_config.json''' UpperCAmelCase = Path(_A ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) ) UpperCAmelCase = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def _lowercase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase = Path(_A ) / '''preprocessor_config.json''' UpperCAmelCase = Path(_A ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase = AutoImageProcessor.from_pretrained(_A ).to_dict() config_dict.pop('''image_processor_type''' ) UpperCAmelCase = CLIPImageProcessor(**_A ) # save in new folder model_config.save_pretrained(_A ) config.save_pretrained(_A ) UpperCAmelCase = AutoImageProcessor.from_pretrained(_A ) # make sure private variable is not incorrectly saved UpperCAmelCase = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_A , _A ) def _lowercase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = Path(_A ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , ) UpperCAmelCase = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def _lowercase ( self ): '''simple docstring''' with self.assertRaisesRegex( _A , '''clip-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase = AutoImageProcessor.from_pretrained('''clip-base''' ) def _lowercase ( self ): '''simple docstring''' with self.assertRaisesRegex( _A , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase = AutoImageProcessor.from_pretrained(_A , revision='''aaaaaa''' ) def _lowercase ( self ): '''simple docstring''' with self.assertRaisesRegex( _A , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _lowercase ( self ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A ) UpperCAmelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_A ) UpperCAmelCase = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _lowercase ( self ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoImageProcessor.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoImageProcessor.register(_A , _A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = Path(_A ) / '''preprocessor_config.json''' UpperCAmelCase = Path(_A ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) ) UpperCAmelCase = CustomImageProcessor.from_pretrained(_A ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_A ) UpperCAmelCase = AutoImageProcessor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase ( self ): '''simple docstring''' class A_ (a_ ): UpperCAmelCase__ = True try: AutoConfig.register('''custom''' , _A ) AutoImageProcessor.register(_A , _A ) # If remote code is not set, the default is to use local UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_A , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
273
import cva import numpy as np class A_ : def __init__( self , _A , _A ): '''simple docstring''' if k in (0.04, 0.06): UpperCAmelCase = k UpperCAmelCase = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ): '''simple docstring''' return str(self.k ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = cva.imread(_A , 0 ) UpperCAmelCase , UpperCAmelCase = img.shape UpperCAmelCase = [] UpperCAmelCase = img.copy() UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB ) UpperCAmelCase , UpperCAmelCase = np.gradient(_A ) UpperCAmelCase = dx**2 UpperCAmelCase = dy**2 UpperCAmelCase = dx * dy UpperCAmelCase = 0.04 UpperCAmelCase = self.window_size // 2 for y in range(_A , h - offset ): for x in range(_A , w - offset ): UpperCAmelCase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = (wxx * wyy) - (wxy**2) UpperCAmelCase = wxx + wyy UpperCAmelCase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": __A : Tuple = HarrisCorner(0.04, 3) __A , __A : List[Any] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
273
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class A_ (datasets.BuilderConfig ): UpperCAmelCase__ = None class A_ (datasets.ArrowBasedBuilder ): UpperCAmelCase__ = PandasConfig def _lowercase ( self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self , _A ): '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): UpperCAmelCase = data_files if isinstance(_A , _A ): UpperCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive UpperCAmelCase = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] UpperCAmelCase = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): UpperCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive UpperCAmelCase = [dl_manager.iter_files(_A ) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def _lowercase ( self , _A ): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example UpperCAmelCase = table_cast(_A , self.config.features.arrow_schema ) return pa_table def _lowercase ( self , _A ): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: UpperCAmelCase = pa.Table.from_pandas(pd.read_pickle(_A ) ) yield i, self._cast_table(_A )
273
from datetime import datetime import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes: '''simple docstring''' UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": __A : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
273
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class A_ : @property def _lowercase ( self ): '''simple docstring''' return self.get_dummy_input() @property def _lowercase ( self ): '''simple docstring''' if self.block_type == "down": return (4, 3_2, 1_6, 1_6) elif self.block_type == "mid": return (4, 3_2, 3_2, 3_2) elif self.block_type == "up": return (4, 3_2, 6_4, 6_4) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def _lowercase ( self , _A=True , _A=False , _A=False , _A=False , ): '''simple docstring''' UpperCAmelCase = 4 UpperCAmelCase = 3_2 UpperCAmelCase = (3_2, 3_2) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = torch.device(_A ) UpperCAmelCase = (batch_size, num_channels) + sizes UpperCAmelCase = randn_tensor(_A , generator=_A , device=_A ) UpperCAmelCase = {'''hidden_states''': hidden_states} if include_temb: UpperCAmelCase = 1_2_8 UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: UpperCAmelCase = torch.manual_seed(1 ) UpperCAmelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: UpperCAmelCase = floats_tensor((batch_size, 3_2, 3_2) ).to(_A ) if include_skip_sample: UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = { '''in_channels''': 3_2, '''out_channels''': 3_2, '''temb_channels''': 1_2_8, } if self.block_type == "up": UpperCAmelCase = 3_2 if self.block_type == "mid": init_dict.pop('''out_channels''' ) UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): UpperCAmelCase = unet_block(**_A ) if isinstance(_A , _A ): UpperCAmelCase = output[0] self.assertEqual(output.shape , self.output_shape ) UpperCAmelCase = output[0, -1, -3:, -3:] UpperCAmelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5E-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase = self.block_class(**_A ) model.to(_A ) model.train() UpperCAmelCase = model(**_A ) if isinstance(_A , _A ): UpperCAmelCase = output[0] UpperCAmelCase = torch.device(_A ) UpperCAmelCase = randn_tensor(output.shape , device=_A ) UpperCAmelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
273
from __future__ import annotations from collections.abc import Callable def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float: '''simple docstring''' UpperCAmelCase = x_start UpperCAmelCase = fnc(UpperCamelCase__ ) UpperCAmelCase = 0.0 for _ in range(UpperCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase = (x_end - x_start) / steps + xa UpperCAmelCase = fnc(UpperCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase = xa UpperCAmelCase = fxa return area if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") __A : List[Any] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
273
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __A : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __A : Tuple = json.load(f) @require_torch class A_ (unittest.TestCase ): def _lowercase ( self , _A ): '''simple docstring''' return FSMTTokenizer.from_pretrained(_A ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = F"""facebook/wmt19-{pair}""" UpperCAmelCase = self.get_tokenizer(_A ) UpperCAmelCase = self.get_model(_A ) UpperCAmelCase = bleu_data[pair]['''src'''] UpperCAmelCase = bleu_data[pair]['''tgt'''] UpperCAmelCase = tokenizer(_A , return_tensors='''pt''' , truncation=_A , padding='''longest''' ).to(_A ) UpperCAmelCase = model.generate( input_ids=batch.input_ids , num_beams=8 , ) UpperCAmelCase = tokenizer.batch_decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) UpperCAmelCase = calculate_bleu(_A , _A ) print(_A ) self.assertGreaterEqual(scores['''bleu'''] , _A )
273
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : Dict = logging.get_logger(__name__) __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Tuple = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : List[Any] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**_A ) UpperCAmelCase = do_lower_case def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
273
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
273
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A : int = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = list(s_dict.keys() ) for key in keys: UpperCAmelCase = R'''.*/layers_(\d+)''' UpperCAmelCase = key if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ ) UpperCAmelCase = R'''(encoder|decoder)\/''' if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups() if groups[0] == "encoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ ) elif groups[0] == "decoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ ) print(F"""{key} -> {new_key}""" ) UpperCAmelCase = s_dict.pop(UpperCamelCase__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase = s_dict[key].shape[0] UpperCAmelCase = s_dict[key] for idx in range(UpperCamelCase__ ): UpperCAmelCase = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(UpperCamelCase__ ) return s_dict __A : Optional[int] = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: '''simple docstring''' import regex as re with open(UpperCamelCase__ , '''r''' ) as f: UpperCAmelCase = f.read() UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ ) UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ ) UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0] UpperCAmelCase = str(activation[1] ) UpperCAmelCase = num_experts UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ ) return config def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]: '''simple docstring''' print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) if gin_file is not None: UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ ) else: UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ ) UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ ) UpperCAmelCase = flax_params['''target'''] UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' ) UpperCAmelCase = rename_keys(UpperCamelCase__ ) UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __A : Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
273
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ : def __init__( self , _A , _A=1_3 , _A=3_2 , _A=3 , _A=4 , _A=[1_0, 2_0, 3_0, 4_0] , _A=[2, 2, 3, 2] , _A=True , _A=True , _A=3_7 , _A="gelu" , _A=1_0 , _A=0.02 , _A=["stage2", "stage3", "stage4"] , _A=3 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = num_channels UpperCAmelCase = num_stages UpperCAmelCase = hidden_sizes UpperCAmelCase = depths UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = out_features UpperCAmelCase = num_labels UpperCAmelCase = scope UpperCAmelCase = num_stages def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _lowercase ( self ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=2_5_5 , num_labels=self.num_labels , ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = UperNetForSemanticSegmentation(config=_A ) model.to(_A ) model.eval() UpperCAmelCase = model(_A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () UpperCAmelCase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = UperNetModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def _lowercase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self ): '''simple docstring''' return def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_A ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''UperNet does not have a base model''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''UperNet does not have a base model''' ) def _lowercase ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' def check_hidden_states_output(_A , _A , _A ): UpperCAmelCase = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase = True check_hidden_states_output(_A , _A , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = _config_zero_init(_A ) UpperCAmelCase = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: UpperCAmelCase = model_class(config=_A ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def _lowercase ( self ): '''simple docstring''' pass @slow def _lowercase ( self ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_A ) self.assertIsNotNone(_A ) def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) UpperCAmelCase = Image.open(UpperCamelCase__ ).convert('''RGB''' ) return image @require_torch @require_vision @slow class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) UpperCAmelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_A ) UpperCAmelCase = prepare_img() UpperCAmelCase = processor(images=_A , return_tensors='''pt''' ).to(_A ) with torch.no_grad(): UpperCAmelCase = model(**_A ) UpperCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase = torch.tensor( [[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) UpperCAmelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_A ) UpperCAmelCase = prepare_img() UpperCAmelCase = processor(images=_A , return_tensors='''pt''' ).to(_A ) with torch.no_grad(): UpperCAmelCase = model(**_A ) UpperCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) )
273
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''prompt'''] UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] if "image" in inputs: UpperCAmelCase = inputs['''image'''] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['''mask_image'''] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['''original_image'''] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A ) # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_A , _A , _A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 )
273
1
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' UpperCAmelCase = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCAmelCase = '''''' UpperCAmelCase = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(UpperCamelCase__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCAmelCase , UpperCAmelCase = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCAmelCase = [1 for i in range(len(UpperCamelCase__ ) )] # for each character in new_string find corresponding palindromic string UpperCAmelCase = 0 for j in range(len(UpperCamelCase__ ) ): UpperCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(UpperCamelCase__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCAmelCase = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCAmelCase = j - k + 1 # noqa: E741 UpperCAmelCase = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCAmelCase = length[j] UpperCAmelCase = j # create that string UpperCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
273
from __future__ import annotations from collections import namedtuple def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple: '''simple docstring''' UpperCAmelCase = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
273
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]: '''simple docstring''' UpperCAmelCase = None if token is not None: UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" UpperCAmelCase = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() UpperCAmelCase = {} try: job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(UpperCamelCase__ ): UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=UpperCamelCase__ ).json() job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return job_links except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' UpperCAmelCase = None if token is not None: UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" UpperCAmelCase = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() UpperCAmelCase = {} try: artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(UpperCamelCase__ ): UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=UpperCamelCase__ ).json() artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) return artifacts except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' UpperCAmelCase = None if token is not None: UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ , allow_redirects=UpperCamelCase__ ) UpperCAmelCase = result.headers['''Location'''] UpperCAmelCase = requests.get(UpperCamelCase__ , allow_redirects=UpperCamelCase__ ) UpperCAmelCase = os.path.join(UpperCamelCase__ , F"""{artifact_name}.zip""" ) with open(UpperCamelCase__ , '''wb''' ) as fp: fp.write(response.content ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> int: '''simple docstring''' UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = None with zipfile.ZipFile(UpperCamelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCamelCase__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(UpperCamelCase__ ) as f: for line in f: UpperCAmelCase = line.decode('''UTF-8''' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCAmelCase = line[: line.index(''': ''' )] UpperCAmelCase = line[line.index(''': ''' ) + len(''': ''' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ): # `test` is the test method that failed UpperCAmelCase = line[len('''FAILED ''' ) :] failed_tests.append(UpperCamelCase__ ) elif filename == "job_name.txt": UpperCAmelCase = line if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( F"""`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase__ )} for `errors` """ F"""and {len(UpperCamelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ''' problem.''' ) UpperCAmelCase = None if job_name and job_links: UpperCAmelCase = job_links.get(UpperCamelCase__ , UpperCamelCase__ ) # A list with elements of the form (line of error, error, failed test) UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(UpperCamelCase__ , UpperCamelCase__ )] return result def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> int: '''simple docstring''' UpperCAmelCase = [] UpperCAmelCase = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if p.endswith('''.zip''' )] for p in paths: errors.extend(get_errors_from_single_artifact(UpperCamelCase__ , job_links=UpperCamelCase__ ) ) return errors def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> str: '''simple docstring''' UpperCAmelCase = Counter() counter.update([x[1] for x in logs] ) UpperCAmelCase = counter.most_common() UpperCAmelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCAmelCase = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]} UpperCAmelCase = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) ) return r def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase = test.split('''::''' )[0] if test.startswith('''tests/models/''' ): UpperCAmelCase = test.split('''/''' )[2] else: UpperCAmelCase = None return test def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]: '''simple docstring''' UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCAmelCase = [x for x in logs if x[2] is not None] UpperCAmelCase = {x[2] for x in logs} UpperCAmelCase = {} for test in tests: UpperCAmelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCAmelCase = counter.most_common() UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCAmelCase = sum(error_counts.values() ) if n_errors > 0: UpperCAmelCase = {'''count''': n_errors, '''errors''': error_counts} UpperCAmelCase = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) ) return r def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = '''| no. | error | status |''' UpperCAmelCase = '''|-:|:-|:-|''' UpperCAmelCase = [header, sep] for error in reduced_by_error: UpperCAmelCase = reduced_by_error[error]['''count'''] UpperCAmelCase = F"""| {count} | {error[:100]} | |""" lines.append(UpperCamelCase__ ) return "\n".join(UpperCamelCase__ ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' UpperCAmelCase = '''| model | no. of errors | major error | count |''' UpperCAmelCase = '''|-:|-:|-:|-:|''' UpperCAmelCase = [header, sep] for model in reduced_by_model: UpperCAmelCase = reduced_by_model[model]['''count'''] UpperCAmelCase , UpperCAmelCase = list(reduced_by_model[model]['''errors'''].items() )[0] UpperCAmelCase = F"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(UpperCamelCase__ ) return "\n".join(UpperCamelCase__ ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") __A : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A : Optional[int] = get_job_links(args.workflow_run_id, token=args.token) __A : Optional[Any] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A : List[Any] = k.find(" / ") __A : str = k[index + len(" / ") :] __A : Optional[int] = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A : Dict = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A : Tuple = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A : Optional[Any] = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A : int = reduce_by_error(errors) __A : List[Any] = reduce_by_model(errors) __A : int = make_github_table(reduced_by_error) __A : int = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
273
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A_ : def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = decoder_seq_length # For common tests UpperCAmelCase = self.decoder_seq_length UpperCAmelCase = is_training UpperCAmelCase = use_attention_mask UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = d_model UpperCAmelCase = d_model UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_attention_heads UpperCAmelCase = decoder_attention_heads UpperCAmelCase = eos_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = decoder_start_token_id UpperCAmelCase = use_cache UpperCAmelCase = max_position_embeddings UpperCAmelCase = None UpperCAmelCase = decoder_seq_length UpperCAmelCase = 2 UpperCAmelCase = 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_attention_mask: UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCAmelCase = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowercase ( self , _A , _A , _A , _A , ): '''simple docstring''' UpperCAmelCase = True UpperCAmelCase = TrOCRDecoder(config=_A ).to(_A ).eval() UpperCAmelCase = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass UpperCAmelCase = model(_A , use_cache=_A ) UpperCAmelCase = model(_A ) UpperCAmelCase = model(_A , use_cache=_A ) self.parent.assertTrue(len(_A ) == len(_A ) ) self.parent.assertTrue(len(_A ) == len(_A ) + 1 ) UpperCAmelCase = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids UpperCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase = model(_A )['''last_hidden_state'''] UpperCAmelCase = model(_A , past_key_values=_A )['''last_hidden_state'''] # select random slice UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () UpperCAmelCase__ = (TrOCRForCausalLM,) if is_torch_available() else () UpperCAmelCase__ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} UpperCAmelCase__ = True UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_A ) UpperCAmelCase = ConfigTester(self , config_class=_A ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A ) def _lowercase ( self ): '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def _lowercase ( self ): '''simple docstring''' pass
273
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if "model" in orig_key: UpperCAmelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: UpperCAmelCase = '''yoso.''' + orig_key return orig_key def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCAmelCase = val UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias'''] UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict'''] UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ ) UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ ) UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A : List[str] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
273
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A_ (a_ ): UpperCAmelCase__ = '''Salesforce/blip-image-captioning-base''' UpperCAmelCase__ = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) UpperCAmelCase__ = '''image_captioner''' UpperCAmelCase__ = AutoModelForVisionaSeq UpperCAmelCase__ = ['''image'''] UpperCAmelCase__ = ['''text'''] def __init__( self , *_A , **_A ): '''simple docstring''' requires_backends(self , ['''vision'''] ) super().__init__(*_A , **_A ) def _lowercase ( self , _A ): '''simple docstring''' return self.pre_processor(images=_A , return_tensors='''pt''' ) def _lowercase ( self , _A ): '''simple docstring''' return self.model.generate(**_A ) def _lowercase ( self , _A ): '''simple docstring''' return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0].strip()
273
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int: '''simple docstring''' UpperCAmelCase = base for _ in range(1 , UpperCamelCase__ ): UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
273
1
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) __A : Optional[Any] = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE ( ) -> str: '''simple docstring''' UpperCAmelCase = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=UpperCamelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=UpperCamelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=UpperCamelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=UpperCamelCase__ , default='''data/dump''' , help='''The dump file prefix.''' ) UpperCAmelCase = parser.parse_args() logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": UpperCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` UpperCAmelCase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase = tokenizer.special_tokens_map['''cls_token'''] # `<s>` UpperCAmelCase = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` UpperCAmelCase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F"""Loading text from {args.file_path}""" ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: UpperCAmelCase = fp.readlines() logger.info('''Start encoding''' ) logger.info(F"""{len(UpperCamelCase__ )} examples to process.""" ) UpperCAmelCase = [] UpperCAmelCase = 0 UpperCAmelCase = 1_0000 UpperCAmelCase = time.time() for text in data: UpperCAmelCase = F"""{bos} {text.strip()} {sep}""" UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) rslt.append(UpperCamelCase__ ) iter += 1 if iter % interval == 0: UpperCAmelCase = time.time() logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) UpperCAmelCase = time.time() logger.info('''Finished binarization''' ) logger.info(F"""{len(UpperCamelCase__ )} examples processed.""" ) UpperCAmelCase = F"""{args.dump_file}.{args.tokenizer_name}.pickle""" UpperCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase = [np.uintaa(UpperCamelCase__ ) for d in rslt] else: UpperCAmelCase = [np.intaa(UpperCamelCase__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"""Dump to {dp_file}""" ) with open(UpperCamelCase__ , '''wb''' ) as handle: pickle.dump(rslt_ , UpperCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
273
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A : Dict = logging.get_logger(__name__) __A : str = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class A_ (a_ ): UpperCAmelCase__ = '''longformer''' def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase = attention_window UpperCAmelCase = sep_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = eos_token_id UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = onnx_export class A_ (a_ ): def __init__( self , _A , _A = "default" , _A = None ): '''simple docstring''' super().__init__(_A , _A , _A ) UpperCAmelCase = True @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = super().outputs if self.task == "default": UpperCAmelCase = {0: '''batch'''} return outputs @property def _lowercase ( self ): '''simple docstring''' return 1E-4 @property def _lowercase ( self ): '''simple docstring''' return max(super().default_onnx_opset , 1_4 ) def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): '''simple docstring''' UpperCAmelCase = super().generate_dummy_inputs( preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global UpperCAmelCase = 1 return inputs
273
1
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline __A : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class A_ (a_ ): def __init__( self , **_A ): '''simple docstring''' super().__init__(**_A ) if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) # No specific FOR_XXX available yet def __call__( self , _A , **_A ): '''simple docstring''' return super().__call__(_A , **_A ) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = {} if "candidate_labels" in kwargs: UpperCAmelCase = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: UpperCAmelCase = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _lowercase ( self , _A , _A=None , _A="This is a sound of {}." ): '''simple docstring''' if isinstance(_A , _A ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png UpperCAmelCase = requests.get(_A ).content else: with open(_A , '''rb''' ) as f: UpperCAmelCase = f.read() if isinstance(_A , _A ): UpperCAmelCase = ffmpeg_read(_A , self.feature_extractor.sampling_rate ) if not isinstance(_A , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) UpperCAmelCase = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) UpperCAmelCase = candidate_labels UpperCAmelCase = [hypothesis_template.format(_A ) for x in candidate_labels] UpperCAmelCase = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) UpperCAmelCase = [text_inputs] return inputs def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = model_inputs.pop('''candidate_labels''' ) UpperCAmelCase = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , _A ): UpperCAmelCase = text_inputs[0] else: # Batching case. UpperCAmelCase = text_inputs[0][0] UpperCAmelCase = self.model(**_A , **_A ) UpperCAmelCase = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = model_outputs.pop('''candidate_labels''' ) UpperCAmelCase = model_outputs['''logits'''][0] if self.framework == "pt": UpperCAmelCase = logits.softmax(dim=0 ) UpperCAmelCase = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) UpperCAmelCase = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
273
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ (a_ ): UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def __init__( self , _A , _A ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ): '''simple docstring''' UpperCAmelCase = self.unet.config.sample_size UpperCAmelCase = (batch_size, 3, img_size, img_size) UpperCAmelCase = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase = self.scheduler.schedule[t] UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase = self.scheduler.step_correct( _A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , ) UpperCAmelCase = step_output.prev_sample UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
273
1
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' assert x is not None assert y is not None UpperCAmelCase = len(UpperCamelCase__ ) UpperCAmelCase = len(UpperCamelCase__ ) # declaring the array for storing the dp values UpperCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0 UpperCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) UpperCAmelCase = '''''' UpperCAmelCase , UpperCAmelCase = m, n while i > 0 and j > 0: UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: UpperCAmelCase = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": __A : Optional[int] = "AGGTAB" __A : Union[str, Any] = "GXTXAYB" __A : Any = 4 __A : int = "GTAB" __A , __A : Union[str, Any] = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
273
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __A : str = random.Random() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' if rng is None: UpperCAmelCase = global_rng UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class A_ (unittest.TestCase ): def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = min_seq_length UpperCAmelCase = max_seq_length UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase = feature_size UpperCAmelCase = padding_value UpperCAmelCase = sampling_rate UpperCAmelCase = do_normalize UpperCAmelCase = num_mel_bins UpperCAmelCase = hop_length UpperCAmelCase = win_length UpperCAmelCase = win_function UpperCAmelCase = fmin UpperCAmelCase = fmax UpperCAmelCase = mel_floor UpperCAmelCase = return_attention_mask def _lowercase ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' def _flatten(_A ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' if equal_length: UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch class A_ (a_ , unittest.TestCase ): UpperCAmelCase__ = SpeechTaFeatureExtractor def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = SpeechTaFeatureExtractionTester(self ) def _lowercase ( self , _A ): '''simple docstring''' self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 ) UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_0_0_0) ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_2_0_0) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa ) UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test feature size UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] UpperCAmelCase = np.asarray(_A ) UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = min(_A ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad( _A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _lowercase ( self , _A ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03, 3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03, 2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04, 4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03, 7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04, 4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 9_3_6_8_0) ) self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77, -3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86, -3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71, -3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) ) self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
273
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A_ (a_ ): UpperCAmelCase__ = (DEISMultistepScheduler,) UpperCAmelCase__ = (('''num_inference_steps''', 2_5),) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**_A ) return config def _lowercase ( self , _A=0 , **_A ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , _A ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config(**_A ) UpperCAmelCase = scheduler_class(**_A ) scheduler.set_timesteps(_A ) # copy over dummy past residuals UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_A ) UpperCAmelCase = scheduler_class.from_pretrained(_A ) new_scheduler.set_timesteps(_A ) # copy over dummy past residuals UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase , UpperCAmelCase = sample, sample for t in range(_A , time_step + scheduler.config.solver_order + 1 ): UpperCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample UpperCAmelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self , _A=0 , **_A ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , _A ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**_A ) scheduler.set_timesteps(_A ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_A ) UpperCAmelCase = scheduler_class.from_pretrained(_A ) # copy over dummy past residuals new_scheduler.set_timesteps(_A ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample UpperCAmelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self , _A=None , **_A ): '''simple docstring''' if scheduler is None: UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**_A ) UpperCAmelCase = scheduler_class(**_A ) UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**_A ) UpperCAmelCase = scheduler_class(**_A ) UpperCAmelCase = 1_0 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_A ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = model(_A , _A ) UpperCAmelCase = scheduler.step(_A , _A , _A ).prev_sample return sample def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , _A ) for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**_A ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ): scheduler.set_timesteps(_A ) elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ): UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] UpperCAmelCase = scheduler.timesteps[5] UpperCAmelCase = scheduler.timesteps[6] UpperCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample UpperCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) UpperCAmelCase = self.full_loop(scheduler=_A ) UpperCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = self.full_loop(scheduler=_A ) UpperCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 def _lowercase ( self ): '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_A ) def _lowercase ( self ): '''simple docstring''' self.check_over_configs(thresholding=_A ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type='''deis''' , solver_order=_A , solver_type=_A , ) def _lowercase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def _lowercase ( self ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , ) UpperCAmelCase = self.full_loop( solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , ) assert not torch.isnan(_A ).any(), "Samples have nan numbers" def _lowercase ( self ): '''simple docstring''' self.check_over_configs(lower_order_final=_A ) self.check_over_configs(lower_order_final=_A ) def _lowercase ( self ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=_A , time_step=0 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.full_loop() UpperCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) UpperCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.0_91 ) < 1E-3 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 ) UpperCAmelCase = scheduler_class(**_A ) UpperCAmelCase = 1_0 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_A ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = model(_A , _A ) UpperCAmelCase = scheduler.step(_A , _A , _A ).prev_sample assert sample.dtype == torch.floataa
273
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A : Union[str, Any] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
1
import numpy as np def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1E-12 , UpperCamelCase__ = 100 , ) -> tuple[float, np.ndarray]: '''simple docstring''' assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[1] # Ensure proper dimensionality. assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(UpperCamelCase__ ) == np.iscomplexobj(UpperCamelCase__ ) UpperCAmelCase = np.iscomplexobj(UpperCamelCase__ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(UpperCamelCase__ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. UpperCAmelCase = False UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. UpperCAmelCase = np.dot(UpperCamelCase__ , UpperCamelCase__ ) # Normalize the resulting output vector. UpperCAmelCase = w / np.linalg.norm(UpperCamelCase__ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) UpperCAmelCase = vector.conj().T if is_complex else vector.T UpperCAmelCase = np.dot(UpperCamelCase__ , np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # Check convergence. UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: UpperCAmelCase = True UpperCAmelCase = lambda_ if is_complex: UpperCAmelCase = np.real(lambda_ ) return lambda_, vector def __SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) UpperCAmelCase = np.array([41, 4, 20] ) UpperCAmelCase = real_input_matrix.astype(np.complexaaa ) UpperCAmelCase = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": UpperCAmelCase = real_input_matrix UpperCAmelCase = real_vector elif problem_type == "complex": UpperCAmelCase = complex_input_matrix UpperCAmelCase = complex_vector # Our implementation. UpperCAmelCase , UpperCAmelCase = power_iteration(UpperCamelCase__ , UpperCamelCase__ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). UpperCAmelCase , UpperCAmelCase = np.linalg.eigh(UpperCamelCase__ ) # Last eigenvalue is the maximum one. UpperCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. UpperCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(UpperCamelCase__ ) - np.abs(UpperCamelCase__ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
273
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(UpperCamelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
273
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) __A : str = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class A_ (a_ ): UpperCAmelCase__ = '''codegen''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _A=5_0_4_0_0 , _A=2_0_4_8 , _A=2_0_4_8 , _A=4_0_9_6 , _A=2_8 , _A=1_6 , _A=6_4 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1E-5 , _A=0.02 , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , **_A , ): '''simple docstring''' UpperCAmelCase = vocab_size UpperCAmelCase = n_ctx UpperCAmelCase = n_positions UpperCAmelCase = n_embd UpperCAmelCase = n_layer UpperCAmelCase = n_head UpperCAmelCase = n_inner UpperCAmelCase = rotary_dim UpperCAmelCase = activation_function UpperCAmelCase = resid_pdrop UpperCAmelCase = embd_pdrop UpperCAmelCase = attn_pdrop UpperCAmelCase = layer_norm_epsilon UpperCAmelCase = initializer_range UpperCAmelCase = use_cache UpperCAmelCase = bos_token_id UpperCAmelCase = eos_token_id super().__init__( bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A ) class A_ (a_ ): def __init__( self , _A , _A = "default" , _A = None , _A = False , ): '''simple docstring''' super().__init__(_A , task=_A , patching_specs=_A , use_past=_A ) if not getattr(self._config , '''pad_token_id''' , _A ): # TODO: how to do that better? UpperCAmelCase = 0 @property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_A , direction='''inputs''' ) UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _lowercase ( self ): '''simple docstring''' return self._config.n_layer @property def _lowercase ( self ): '''simple docstring''' return self._config.n_head def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): '''simple docstring''' UpperCAmelCase = super(_A , self ).generate_dummy_inputs( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase = seqlen + 2 UpperCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers ) ] UpperCAmelCase = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 ) return ordered_inputs @property def _lowercase ( self ): '''simple docstring''' return 1_3
273
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def _lowercase ( self , _A=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def _lowercase ( self , _A ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
273
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __A : Any = logging.get_logger(__name__) __A : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : List[str] = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __A : Union[str, Any] = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __A : Tuple = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } __A : Dict = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } __A : Tuple = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } __A : str = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } __A : Dict = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } __A : str = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } __A : List[Any] = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __A : Union[str, Any] = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) __A : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) __A : int = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(a_ ) class A_ : def __call__( self , _A , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , **_A , ): '''simple docstring''' if titles is None and texts is None: return super().__call__( _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , ) elif titles is None or texts is None: UpperCAmelCase = titles if texts is None else texts return super().__call__( _A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , ) UpperCAmelCase = titles if not isinstance(_A , _A ) else [titles] UpperCAmelCase = texts if not isinstance(_A , _A ) else [texts] UpperCAmelCase = len(_A ) UpperCAmelCase = questions if not isinstance(_A , _A ) else [questions] * n_passages if len(_A ) != len(_A ): raise ValueError( F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" ) UpperCAmelCase = super().__call__(_A , _A , padding=_A , truncation=_A )['''input_ids'''] UpperCAmelCase = super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['''input_ids'''] UpperCAmelCase = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_A , _A ) ] } if return_attention_mask is not False: UpperCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase = attention_mask return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A ) def _lowercase ( self , _A , _A , _A = 1_6 , _A = 6_4 , _A = 4 , ): '''simple docstring''' UpperCAmelCase = reader_input['''input_ids'''] UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reader_output[:3] UpperCAmelCase = len(_A ) UpperCAmelCase = sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ ) UpperCAmelCase = [] for doc_id in sorted_docs: UpperCAmelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase = len(_A ) UpperCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_A ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowercase ( self , _A , _A , _A , _A , ): '''simple docstring''' UpperCAmelCase = [] for start_index, start_score in enumerate(_A ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase = sorted(_A , key=lambda _A : x[1] , reverse=_A ) UpperCAmelCase = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) UpperCAmelCase = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_A ) == top_spans: break return chosen_span_intervals @add_end_docstrings(a_ ) class A_ (a_ , a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
273
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : Tuple = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class A_ (a_ ): UpperCAmelCase__ = '''big_bird''' def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ): '''simple docstring''' super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = rescale_embeddings UpperCAmelCase = attention_type UpperCAmelCase = use_bias UpperCAmelCase = block_size UpperCAmelCase = num_random_blocks UpperCAmelCase = classifier_dropout class A_ (a_ ): @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
273
1
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class A_ (a_ , unittest.TestCase ): UpperCAmelCase__ = TransfoXLTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' super().setUp() UpperCAmelCase = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = '''<unk> UNwanted , running''' UpperCAmelCase = '''<unk> unwanted, running''' return input_text, output_text def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A ) UpperCAmelCase = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(_A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [0, 4, 8, 7] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TransfoXLTokenizer(lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TransfoXLTokenizer(lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TransfoXLTokenizer(lower_case=_A ) UpperCAmelCase = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' UpperCAmelCase = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(_A ) , _A ) self.assertEqual(tokenizer.convert_tokens_to_string(_A ) , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = len(_A ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(_A ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
273
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase = (image_size // patch_size) ** 2 UpperCAmelCase = num_patches + 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = TFViTModel(config=_A ) UpperCAmelCase = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = self.type_sequence_label_size UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase = 1 UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_A ) def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A_ (unittest.TestCase ): @cached_property def _lowercase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) UpperCAmelCase = self.default_image_processor UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' ) # forward pass UpperCAmelCase = model(**_A ) # verify the logits UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
273
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
273
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
273
1
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __A : Union[str, Any] = logging.get_logger(__name__) __A : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __A : Any = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } __A : int = { "allenai/led-base-16384": 16_384, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = LEDTokenizer UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _A=None , _A=None , _A=None , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , _A=True , **_A , ): '''simple docstring''' super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase = add_prefix_space UpperCAmelCase = pre_tok_class(**_A ) UpperCAmelCase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase = '''post_processor''' UpperCAmelCase = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase = tuple(state['''sep'''] ) if "cls" in state: UpperCAmelCase = tuple(state['''cls'''] ) UpperCAmelCase = False if state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase = add_prefix_space UpperCAmelCase = True if state.get('''trim_offsets''' , _A ) != trim_offsets: UpperCAmelCase = trim_offsets UpperCAmelCase = True if changes_to_apply: UpperCAmelCase = getattr(_A , state.pop('''type''' ) ) UpperCAmelCase = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowercase ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value UpperCAmelCase = value def _lowercase ( self , *_A , **_A ): '''simple docstring''' UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_A , **_A ) def _lowercase ( self , *_A , **_A ): '''simple docstring''' UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_A , **_A ) def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self , _A , _A = None , _A = PaddingStrategy.DO_NOT_PAD , _A = None , _A = None , ): '''simple docstring''' UpperCAmelCase = super()._pad( encoded_inputs=_A , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(_A ) if needs_to_be_padded: UpperCAmelCase = len(_A ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
273
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") __A : Optional[int] = logging.getLogger(__name__) @dataclass class A_ : UpperCAmelCase__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCAmelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A_ : UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowercase ( self ): '''simple docstring''' if self.train_file is not None: UpperCAmelCase = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = True UpperCAmelCase__ = None UpperCAmelCase__ = None def __call__( self , _A ): '''simple docstring''' UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase = [feature.pop(_A ) for feature in features] UpperCAmelCase = len(_A ) UpperCAmelCase = len(features[0]['''input_ids'''] ) UpperCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] UpperCAmelCase = list(chain(*_A ) ) UpperCAmelCase = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa ) return batch def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase = {} if data_args.train_file is not None: UpperCAmelCase = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase = data_args.validation_file UpperCAmelCase = data_args.train_file.split('''.''' )[-1] UpperCAmelCase = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase = [F"""ending{i}""" for i in range(4 )] UpperCAmelCase = '''sent1''' UpperCAmelCase = '''sent2''' if data_args.max_seq_length is None: UpperCAmelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) UpperCAmelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ ): UpperCAmelCase = [[context] * 4 for context in examples[context_name]] UpperCAmelCase = examples[question_header_name] UpperCAmelCase = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) # Tokenize UpperCAmelCase = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ ): UpperCAmelCase , UpperCAmelCase = eval_predictions UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase = train_result.metrics UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''train''' , UpperCamelCase__ ) trainer.save_metrics('''train''' , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''eval''' , UpperCamelCase__ ) trainer.save_metrics('''eval''' , UpperCamelCase__ ) UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
273
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A : Union[str, Any] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A_ : UpperCAmelCase__ = MBartConfig UpperCAmelCase__ = {} UpperCAmelCase__ = '''gelu''' def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A ) return config, inputs_dict def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = TFMBartModel(config=_A ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = inputs_dict['''head_mask'''] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A ) UpperCAmelCase , UpperCAmelCase = outputs.to_tuple() UpperCAmelCase = past_key_values[1] def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFMBartModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A ) @require_sentencepiece @require_tokenizers @require_tf class A_ (unittest.TestCase ): UpperCAmelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCAmelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCAmelCase__ = '''facebook/mbart-large-en-ro''' @cached_property def _lowercase ( self ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.translate_src_text(**_A ) self.assertListEqual(self.expected_text , _A ) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' ) UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A ) return generated_words @slow def _lowercase ( self ): '''simple docstring''' self._assert_generated_batch_equal_expected()
273
1
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase = [], [] UpperCAmelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) ) UpperCAmelCase , UpperCAmelCase = sorted_examples[0] def is_too_big(UpperCamelCase__ ): return tok(UpperCamelCase__ , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): UpperCAmelCase = new_src + ''' ''' + src UpperCAmelCase = new_tgt + ''' ''' + tgt if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example finished_src.append(UpperCamelCase__ ) finished_tgt.append(UpperCamelCase__ ) UpperCAmelCase , UpperCAmelCase = src, tgt else: # can fit, keep adding UpperCAmelCase , UpperCAmelCase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCamelCase__ ) finished_tgt.append(UpperCamelCase__ ) return finished_src, finished_tgt def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' UpperCAmelCase = Path(UpperCamelCase__ ) save_path.mkdir(exist_ok=UpperCamelCase__ ) for split in ["train"]: UpperCAmelCase , UpperCAmelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" UpperCAmelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()] UpperCAmelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()] UpperCAmelCase , UpperCAmelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" ) Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) for split in ["val", "test"]: UpperCAmelCase , UpperCAmelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" ) shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" ) def __SCREAMING_SNAKE_CASE ( ) -> Tuple: '''simple docstring''' UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=UpperCamelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=UpperCamelCase__ , default=128 ) parser.add_argument('''--data_dir''' , type=UpperCamelCase__ ) parser.add_argument('''--save_path''' , type=UpperCamelCase__ ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
273
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class A_ : def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_input_mask UpperCAmelCase = use_labels UpperCAmelCase = use_mc_token_ids UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope UpperCAmelCase = self.vocab_size - 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None if self.use_mc_token_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def _lowercase ( self ): '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLModel(config=_A ) model.to(_A ) model.eval() model(_A , token_type_ids=_A , head_mask=_A ) model(_A , token_type_ids=_A ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel(_A ) model.to(_A ) model.eval() UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def _lowercase ( self , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = CTRLForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 ) def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): '''simple docstring''' pass @slow def _lowercase ( self ): '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = CTRLModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def _lowercase ( self ): '''simple docstring''' pass @require_torch class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(_A ) UpperCAmelCase = torch.tensor( [[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is UpperCAmelCase = [ 1_1_8_5_9, 0, 1_6_1_1, 8, 5, 1_5_0, 2_6_4_4_9, 2, 1_9, 3_4_8, 4_6_9, 3, 2_5_9_5, 4_8, 2_0_7_4_0, 2_4_6_5_3_3, 2_4_6_5_3_3, 1_9, 3_0, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a UpperCAmelCase = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
273
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class A_ (unittest.TestCase ): def _lowercase ( self , _A ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): UpperCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , torchscript=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , fpaa=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = AutoConfig.from_pretrained(_A ) # set architectures equal to `None` UpperCAmelCase = None UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A , configs=[config] ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_A , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = AutoConfig.from_pretrained(_A ) UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A , configs=[config] ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tinier_bart''' UpperCAmelCase = AutoConfig.from_pretrained(_A ) UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A , configs=[config] ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' UpperCAmelCase = AutoConfig.from_pretrained(_A ) UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A , configs=[config] ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tinier_bart''' UpperCAmelCase = AutoConfig.from_pretrained(_A ) UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A , configs=[config] ) UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_A , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_A , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_A , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_A , '''env.csv''' ) , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) benchmark.run() self.assertTrue(Path(os.path.join(_A , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_A , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_A , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_A , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_A , '''env.csv''' ) ).exists() ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_A ): self.assertTrue(hasattr(_A , '''sequential''' ) ) self.assertTrue(hasattr(_A , '''cumulative''' ) ) self.assertTrue(hasattr(_A , '''current''' ) ) self.assertTrue(hasattr(_A , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , '''log.txt''' ) , log_print=_A , trace_memory_line_by_line=_A , multi_process=_A , ) UpperCAmelCase = PyTorchBenchmark(_A ) UpperCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_A , '''log.txt''' ) ).exists() )
273
import cva import numpy as np class A_ : def __init__( self , _A , _A ): '''simple docstring''' if k in (0.04, 0.06): UpperCAmelCase = k UpperCAmelCase = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ): '''simple docstring''' return str(self.k ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = cva.imread(_A , 0 ) UpperCAmelCase , UpperCAmelCase = img.shape UpperCAmelCase = [] UpperCAmelCase = img.copy() UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB ) UpperCAmelCase , UpperCAmelCase = np.gradient(_A ) UpperCAmelCase = dx**2 UpperCAmelCase = dy**2 UpperCAmelCase = dx * dy UpperCAmelCase = 0.04 UpperCAmelCase = self.window_size // 2 for y in range(_A , h - offset ): for x in range(_A , w - offset ): UpperCAmelCase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = (wxx * wyy) - (wxy**2) UpperCAmelCase = wxx + wyy UpperCAmelCase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": __A : Tuple = HarrisCorner(0.04, 3) __A , __A : List[Any] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
273
1
from __future__ import annotations from typing import Any class A_ : def __init__( self , _A ): '''simple docstring''' UpperCAmelCase = num_of_nodes UpperCAmelCase = [] UpperCAmelCase = {} def _lowercase ( self , _A , _A , _A ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self , _A ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self , _A ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase = self.find_component(_A ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: UpperCAmelCase = v_node component_size[v_node] += component_size[u_node] self.set_component(_A ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase = self.find_component(_A ) component_size[u_node] += component_size[v_node] self.set_component(_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = [] UpperCAmelCase = 0 UpperCAmelCase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge UpperCAmelCase = self.m_component[u] UpperCAmelCase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase = [u, v, w] for edge in minimum_weight_edge: if isinstance(_A , _A ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge UpperCAmelCase = self.m_component[u] UpperCAmelCase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_A , _A , _A ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 UpperCAmelCase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
273
from datetime import datetime import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes: '''simple docstring''' UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": __A : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
273
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = BlipImageProcessor() UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' ) UpperCAmelCase = BlipProcessor(_A , _A ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self , **_A ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer def _lowercase ( self , **_A ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor def _lowercase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCAmelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) UpperCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BlipProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = image_processor(_A , return_tensors='''np''' ) UpperCAmelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BlipProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = '''lower newer''' UpperCAmelCase = processor(text=_A ) UpperCAmelCase = tokenizer(_A , return_token_type_ids=_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BlipProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = '''lower newer''' UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BlipProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase = processor.batch_decode(_A ) UpperCAmelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BlipProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = '''lower newer''' UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=_A , images=_A ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
273
from __future__ import annotations from collections.abc import Callable def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float: '''simple docstring''' UpperCAmelCase = x_start UpperCAmelCase = fnc(UpperCamelCase__ ) UpperCAmelCase = 0.0 for _ in range(UpperCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase = (x_end - x_start) / steps + xa UpperCAmelCase = fnc(UpperCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase = xa UpperCAmelCase = fxa return area if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") __A : List[Any] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
273
1
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = None __A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess") def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(UpperCamelCase__ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(UpperCamelCase__ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(UpperCamelCase__ ) != count_coins(UpperCamelCase__ ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(UpperCamelCase__ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase , UpperCAmelCase = get_distrib(node.left ) UpperCAmelCase , UpperCAmelCase = get_distrib(node.right ) UpperCAmelCase = 1 - left_distrib_excess UpperCAmelCase = 1 - right_distrib_excess UpperCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(UpperCamelCase__ ) + abs(UpperCamelCase__ ) ) UpperCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(UpperCamelCase__ , UpperCamelCase__ ) return get_distrib(UpperCamelCase__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
273
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : Dict = logging.get_logger(__name__) __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Tuple = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : List[Any] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**_A ) UpperCAmelCase = do_lower_case def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
273
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A : Tuple = logging.get_logger(__name__) __A : str = torch.device("cpu") def __SCREAMING_SNAKE_CASE ( ) -> str: '''simple docstring''' UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase = dct.pop(UpperCamelCase__ ) UpperCAmelCase = val def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = [] for k in state_dict.keys(): UpperCAmelCase = k if ".pwconv" in k: UpperCAmelCase = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: UpperCAmelCase = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: UpperCAmelCase = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: UpperCAmelCase = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: UpperCAmelCase = k_new.split('''.''' ) if ls[2].isdigit(): UpperCAmelCase = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: UpperCAmelCase = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' UpperCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase = 1000 UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase = [3, 3, 6, 4] UpperCAmelCase = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": UpperCAmelCase = [3, 3, 9, 6] UpperCAmelCase = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase = [4, 3, 10, 5] UpperCAmelCase = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase = [4, 4, 12, 6] UpperCAmelCase = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ ) else: UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' ) UpperCAmelCase = checkpoint UpperCAmelCase = create_rename_keys(UpperCamelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model UpperCAmelCase = SwiftFormerForImageClassification(UpperCamelCase__ ).eval() hf_model.load_state_dict(UpperCamelCase__ ) # prepare test inputs UpperCAmelCase = prepare_img() UpperCAmelCase = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) UpperCAmelCase = processor(images=UpperCamelCase__ , return_tensors='''pt''' ) # compare outputs from both models UpperCAmelCase = get_expected_output(UpperCamelCase__ ) UpperCAmelCase = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1E-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") __A : Dict = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
273
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A : int = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = list(s_dict.keys() ) for key in keys: UpperCAmelCase = R'''.*/layers_(\d+)''' UpperCAmelCase = key if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ ) UpperCAmelCase = R'''(encoder|decoder)\/''' if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups() if groups[0] == "encoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ ) elif groups[0] == "decoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ ) print(F"""{key} -> {new_key}""" ) UpperCAmelCase = s_dict.pop(UpperCamelCase__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase = s_dict[key].shape[0] UpperCAmelCase = s_dict[key] for idx in range(UpperCamelCase__ ): UpperCAmelCase = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(UpperCamelCase__ ) return s_dict __A : Optional[int] = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: '''simple docstring''' import regex as re with open(UpperCamelCase__ , '''r''' ) as f: UpperCAmelCase = f.read() UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ ) UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ ) UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0] UpperCAmelCase = str(activation[1] ) UpperCAmelCase = num_experts UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ ) return config def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]: '''simple docstring''' print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) if gin_file is not None: UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ ) else: UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ ) UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ ) UpperCAmelCase = flax_params['''target'''] UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' ) UpperCAmelCase = rename_keys(UpperCamelCase__ ) UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __A : Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
273
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : Dict = logging.get_logger(__name__) __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Tuple = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : List[Any] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**_A ) UpperCAmelCase = do_lower_case def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
273
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''prompt'''] UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] if "image" in inputs: UpperCAmelCase = inputs['''image'''] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['''mask_image'''] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['''original_image'''] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A ) # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_A , _A , _A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 )
273
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A_ (a_ , a_ , a_ ): @register_to_config def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ): '''simple docstring''' super().__init__() UpperCAmelCase = nn.Embedding(_A , _A ) UpperCAmelCase = nn.Embedding(_A , _A ) UpperCAmelCase = False UpperCAmelCase = nn.Dropout(p=_A ) UpperCAmelCase = TaConfig( vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , ) UpperCAmelCase = nn.ModuleList() for lyr_num in range(_A ): UpperCAmelCase = TaBlock(_A ) self.encoders.append(_A ) UpperCAmelCase = TaLayerNorm(_A ) UpperCAmelCase = nn.Dropout(p=_A ) def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = self.token_embedder(_A ) UpperCAmelCase = encoder_input_tokens.shape[1] UpperCAmelCase = torch.arange(_A , device=encoder_input_tokens.device ) x += self.position_encoding(_A ) UpperCAmelCase = self.dropout_pre(_A ) # inverted the attention mask UpperCAmelCase = encoder_input_tokens.size() UpperCAmelCase = self.get_extended_attention_mask(_A , _A ) for lyr in self.encoders: UpperCAmelCase = lyr(_A , _A )[0] UpperCAmelCase = self.layer_norm(_A ) return self.dropout_post(_A ), encoder_inputs_mask
273
from __future__ import annotations from collections import namedtuple def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple: '''simple docstring''' UpperCAmelCase = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
273
1
from typing import List from .keymap import KEYMAP, get_character def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' def decorator(UpperCamelCase__ ): UpperCAmelCase = getattr(UpperCamelCase__ , '''handle_key''' , [] ) handle += [key] setattr(UpperCamelCase__ , '''handle_key''' , UpperCamelCase__ ) return func return decorator def __SCREAMING_SNAKE_CASE ( *UpperCamelCase__ ) -> Tuple: '''simple docstring''' def decorator(UpperCamelCase__ ): UpperCAmelCase = getattr(UpperCamelCase__ , '''handle_key''' , [] ) handle += keys setattr(UpperCamelCase__ , '''handle_key''' , UpperCamelCase__ ) return func return decorator class A_ (a_ ): def __new__( cls , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = super().__new__(cls , _A , _A , _A ) if not hasattr(_A , '''key_handler''' ): setattr(_A , '''key_handler''' , {} ) setattr(_A , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): UpperCAmelCase = getattr(_A , '''handle_key''' , [] ) for key in handled_keys: UpperCAmelCase = value return new_cls @staticmethod def _lowercase ( cls ): '''simple docstring''' UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: UpperCAmelCase = ord(_A ) UpperCAmelCase = cls.key_handler.get(_A ) if handler: UpperCAmelCase = char return handler(cls ) else: return None def __SCREAMING_SNAKE_CASE ( cls ) -> int: '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
273
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
1
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __A : str = "scheduler_config.json" class A_ (a_ ): UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 UpperCAmelCase__ = 3 UpperCAmelCase__ = 4 UpperCAmelCase__ = 5 UpperCAmelCase__ = 6 UpperCAmelCase__ = 7 UpperCAmelCase__ = 8 UpperCAmelCase__ = 9 UpperCAmelCase__ = 1_0 UpperCAmelCase__ = 1_1 UpperCAmelCase__ = 1_2 UpperCAmelCase__ = 1_3 UpperCAmelCase__ = 1_4 @dataclass class A_ (a_ ): UpperCAmelCase__ = 42 class A_ : UpperCAmelCase__ = SCHEDULER_CONFIG_NAME UpperCAmelCase__ = [] UpperCAmelCase__ = True @classmethod def _lowercase ( cls , _A = None , _A = None , _A=False , **_A , ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cls.load_config( pretrained_model_name_or_path=_A , subfolder=_A , return_unused_kwargs=_A , return_commit_hash=_A , **_A , ) return cls.from_config(_A , return_unused_kwargs=_A , **_A ) def _lowercase ( self , _A , _A = False , **_A ): '''simple docstring''' self.save_config(save_directory=_A , push_to_hub=_A , **_A ) @property def _lowercase ( self ): '''simple docstring''' return self._get_compatibles() @classmethod def _lowercase ( cls ): '''simple docstring''' UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCAmelCase = importlib.import_module(__name__.split('''.''' )[0] ) UpperCAmelCase = [ getattr(_A , _A ) for c in compatible_classes_str if hasattr(_A , _A ) ] return compatible_classes
273
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if "model" in orig_key: UpperCAmelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: UpperCAmelCase = '''yoso.''' + orig_key return orig_key def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCAmelCase = val UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias'''] UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict'''] UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ ) UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ ) UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A : List[str] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
273
1
from ...configuration_utils import PretrainedConfig __A : str = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class A_ (a_ ): UpperCAmelCase__ = '''tapas''' def __init__( self , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0_2_4 , _A=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , _A=0.02 , _A=1E-12 , _A=0 , _A=10.0 , _A=0 , _A=1.0 , _A=None , _A=1.0 , _A=False , _A=None , _A=1.0 , _A=1.0 , _A=False , _A=False , _A="ratio" , _A=None , _A=None , _A=6_4 , _A=3_2 , _A=False , _A=True , _A=False , _A=False , _A=True , _A=False , _A=None , _A=None , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_sizes UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps # Fine-tuning task hyperparameters UpperCAmelCase = positive_label_weight UpperCAmelCase = num_aggregation_labels UpperCAmelCase = aggregation_loss_weight UpperCAmelCase = use_answer_as_supervision UpperCAmelCase = answer_loss_importance UpperCAmelCase = use_normalized_answer_loss UpperCAmelCase = huber_loss_delta UpperCAmelCase = temperature UpperCAmelCase = aggregation_temperature UpperCAmelCase = use_gumbel_for_cells UpperCAmelCase = use_gumbel_for_aggregation UpperCAmelCase = average_approximation_function UpperCAmelCase = cell_selection_preference UpperCAmelCase = answer_loss_cutoff UpperCAmelCase = max_num_rows UpperCAmelCase = max_num_columns UpperCAmelCase = average_logits_per_cell UpperCAmelCase = select_one_column UpperCAmelCase = allow_empty_column_selection UpperCAmelCase = init_cell_selection_weights_to_zero UpperCAmelCase = reset_position_index_per_cell UpperCAmelCase = disable_per_token_loss # Aggregation hyperparameters UpperCAmelCase = aggregation_labels UpperCAmelCase = no_aggregation_label_index if isinstance(self.aggregation_labels , _A ): UpperCAmelCase = {int(_A ): v for k, v in aggregation_labels.items()}
273
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int: '''simple docstring''' UpperCAmelCase = base for _ in range(1 , UpperCamelCase__ ): UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
273
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class A_ (unittest.TestCase ): UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None @property def _lowercase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = (3, 3_2, 1_2_8) UpperCAmelCase = tempfile.mkdtemp() # fmt: off UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) UpperCAmelCase = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 3_2, '''width''': 1_2_8}, } UpperCAmelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def _lowercase ( self , **_A ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A ) def _lowercase ( self , **_A ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def _lowercase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) UpperCAmelCase = Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) return image_input def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_image_processor() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_image_processor() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) UpperCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = image_processor(_A , return_tensors='''np''' ) UpperCAmelCase = processor(images=_A , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = '''test''' UpperCAmelCase = processor(text=_A ) UpperCAmelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = '''test''' UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase = processor.char_decode(_A ) UpperCAmelCase = tokenizer.batch_decode(_A ) UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(_A , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = None UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase = torch.randn(1 , 2_7 , 3_8 ) UpperCAmelCase = torch.randn(1 , 2_7 , 5_0_2_5_7 ) UpperCAmelCase = torch.randn(1 , 2_7 , 3_0_5_2_2 ) UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
273
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A : Dict = logging.get_logger(__name__) __A : str = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class A_ (a_ ): UpperCAmelCase__ = '''longformer''' def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase = attention_window UpperCAmelCase = sep_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = eos_token_id UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = onnx_export class A_ (a_ ): def __init__( self , _A , _A = "default" , _A = None ): '''simple docstring''' super().__init__(_A , _A , _A ) UpperCAmelCase = True @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = super().outputs if self.task == "default": UpperCAmelCase = {0: '''batch'''} return outputs @property def _lowercase ( self ): '''simple docstring''' return 1E-4 @property def _lowercase ( self ): '''simple docstring''' return max(super().default_onnx_opset , 1_4 ) def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): '''simple docstring''' UpperCAmelCase = super().generate_dummy_inputs( preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global UpperCAmelCase = 1 return inputs
273
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A : Dict = { "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"], "tokenization_perceiver": ["PerceiverTokenizer"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ["PerceiverFeatureExtractor"] __A : Tuple = ["PerceiverImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", "PerceiverForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ (a_ ): UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def __init__( self , _A , _A ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ): '''simple docstring''' UpperCAmelCase = self.unet.config.sample_size UpperCAmelCase = (batch_size, 3, img_size, img_size) UpperCAmelCase = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase = self.scheduler.schedule[t] UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase = self.scheduler.step_correct( _A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , ) UpperCAmelCase = step_output.prev_sample UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
273
1
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list: '''simple docstring''' UpperCAmelCase = False while is_sorted is False: # Until all the indices are traversed keep looping UpperCAmelCase = True for i in range(0 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: UpperCAmelCase , UpperCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order UpperCAmelCase = False for i in range(1 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: UpperCAmelCase , UpperCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order UpperCAmelCase = False return input_list if __name__ == "__main__": print("Enter list to be sorted") __A : List[str] = [int(x) for x in input().split()] # inputing elements of the list in one line __A : Union[str, Any] = odd_even_sort(input_list) print("The sorted list is") print(sorted_list)
273
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __A : str = random.Random() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' if rng is None: UpperCAmelCase = global_rng UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class A_ (unittest.TestCase ): def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = min_seq_length UpperCAmelCase = max_seq_length UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase = feature_size UpperCAmelCase = padding_value UpperCAmelCase = sampling_rate UpperCAmelCase = do_normalize UpperCAmelCase = num_mel_bins UpperCAmelCase = hop_length UpperCAmelCase = win_length UpperCAmelCase = win_function UpperCAmelCase = fmin UpperCAmelCase = fmax UpperCAmelCase = mel_floor UpperCAmelCase = return_attention_mask def _lowercase ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' def _flatten(_A ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' if equal_length: UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch class A_ (a_ , unittest.TestCase ): UpperCAmelCase__ = SpeechTaFeatureExtractor def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = SpeechTaFeatureExtractionTester(self ) def _lowercase ( self , _A ): '''simple docstring''' self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 ) UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_0_0_0) ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_2_0_0) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa ) UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test feature size UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] UpperCAmelCase = np.asarray(_A ) UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = min(_A ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad( _A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _lowercase ( self , _A ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03, 3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03, 2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04, 4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03, 7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04, 4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 9_3_6_8_0) ) self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77, -3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86, -3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71, -3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) ) self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
273
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A : int = logging.get_logger(__name__) __A : Tuple = {"vocab_file": "sentencepiece.bpe.model"} __A : Optional[Any] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } __A : Union[str, Any] = { "camembert-base": 512, } __A : Dict = "▁" class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _A , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=["<s>NOTUSED", "</s>NOTUSED"] , _A = None , **_A , ): '''simple docstring''' UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) UpperCAmelCase = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> UpperCAmelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} UpperCAmelCase = len(self.fairseq_tokens_to_ids ) UpperCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _lowercase ( self , _A , _A = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self , _A , _A = None , _A = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowercase ( self ): '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self , _A ): '''simple docstring''' return self.sp_model.encode(_A , out_type=_A ) def _lowercase ( self , _A ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_A ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_A ) def _lowercase ( self , _A ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = [] UpperCAmelCase = '''''' UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_A ) + token UpperCAmelCase = True UpperCAmelCase = [] else: current_sub_tokens.append(_A ) UpperCAmelCase = False out_string += self.sp_model.decode(_A ) return out_string.strip() def __getstate__( self ): '''simple docstring''' UpperCAmelCase = self.__dict__.copy() UpperCAmelCase = None return state def __setstate__( self , _A ): '''simple docstring''' UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase = {} UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , _A , _A = None ): '''simple docstring''' if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
273
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A : Union[str, Any] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
1
from __future__ import annotations import math def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) ) def __SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' UpperCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423] UpperCAmelCase = math.log(len(UpperCamelCase__ ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
273
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(UpperCamelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
273
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") __A : Optional[int] = logging.getLogger(__name__) @dataclass class A_ : UpperCAmelCase__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCAmelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A_ : UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowercase ( self ): '''simple docstring''' if self.train_file is not None: UpperCAmelCase = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = True UpperCAmelCase__ = None UpperCAmelCase__ = None def __call__( self , _A ): '''simple docstring''' UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase = [feature.pop(_A ) for feature in features] UpperCAmelCase = len(_A ) UpperCAmelCase = len(features[0]['''input_ids'''] ) UpperCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] UpperCAmelCase = list(chain(*_A ) ) UpperCAmelCase = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa ) return batch def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase = {} if data_args.train_file is not None: UpperCAmelCase = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase = data_args.validation_file UpperCAmelCase = data_args.train_file.split('''.''' )[-1] UpperCAmelCase = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase = [F"""ending{i}""" for i in range(4 )] UpperCAmelCase = '''sent1''' UpperCAmelCase = '''sent2''' if data_args.max_seq_length is None: UpperCAmelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) UpperCAmelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ ): UpperCAmelCase = [[context] * 4 for context in examples[context_name]] UpperCAmelCase = examples[question_header_name] UpperCAmelCase = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) # Tokenize UpperCAmelCase = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ ): UpperCAmelCase , UpperCAmelCase = eval_predictions UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase = train_result.metrics UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''train''' , UpperCamelCase__ ) trainer.save_metrics('''train''' , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''eval''' , UpperCamelCase__ ) trainer.save_metrics('''eval''' , UpperCamelCase__ ) UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
273
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def _lowercase ( self , _A=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def _lowercase ( self , _A ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
273
1
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any: # picklable for multiprocessing '''simple docstring''' return x.sum() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[Any]: # picklable for multiprocessing '''simple docstring''' return i + 1 @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 class A_ (a_ ): def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = {} UpperCAmelCase = [] UpperCAmelCase = 1 UpperCAmelCase = [1, 2] UpperCAmelCase = {'''a''': 1, '''b''': 2} UpperCAmelCase = {'''a''': [1, 2], '''b''': [3, 4]} UpperCAmelCase = {'''a''': {'''1''': 1}, '''b''': 2} UpperCAmelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} UpperCAmelCase = {} UpperCAmelCase = [] UpperCAmelCase = 2 UpperCAmelCase = [2, 3] UpperCAmelCase = {'''a''': 2, '''b''': 3} UpperCAmelCase = {'''a''': [2, 3], '''b''': [4, 5]} UpperCAmelCase = {'''a''': {'''1''': 2}, '''b''': 3} UpperCAmelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) self.assertEqual(map_nested(_A , _A ) , _A ) UpperCAmelCase = 2 self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) self.assertEqual(map_nested(_A , _A , num_proc=_A ) , _A ) UpperCAmelCase = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} UpperCAmelCase = {'''a''': 2, '''b''': 0, '''c''': 2} UpperCAmelCase = { '''a''': np.eye(2 ).astype(_A ), '''b''': np.zeros(3 ).astype(_A ), '''c''': np.ones(2 ).astype(_A ), } self.assertEqual(map_nested(_A , _A , map_numpy=_A ) , _A ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_A , _A , map_numpy=_A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(_A , _A , map_numpy=_A , num_proc=_A ) , _A ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_A , _A , map_numpy=_A , num_proc=_A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(_A ): # can't pickle a local lambda map_nested(lambda _A : x + 1 , _A , num_proc=_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = {'''a''': 1, '''b''': 2} UpperCAmelCase = {'''a''': 3, '''b''': 4} UpperCAmelCase = {'''a''': 5, '''b''': 6} UpperCAmelCase = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(_A , _A , _A ) ) , _A ) def _lowercase ( self ): '''simple docstring''' class A_ : UpperCAmelCase__ = '''bar''' UpperCAmelCase = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(_A , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: UpperCAmelCase = {F"""{i}""": i for i in range(UpperCamelCase__ )} UpperCAmelCase = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A_ (a_ ): @require_tf def _lowercase ( self ): '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers UpperCAmelCase = layers.Dense(2 ) def gen_random_output(): UpperCAmelCase = tf.random.uniform((1, 3) ) return model(_A ).numpy() with temp_seed(4_2 , set_tensorflow=_A ): UpperCAmelCase = gen_random_output() with temp_seed(4_2 , set_tensorflow=_A ): UpperCAmelCase = gen_random_output() UpperCAmelCase = gen_random_output() np.testing.assert_equal(_A , _A ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def _lowercase ( self ): '''simple docstring''' import torch def gen_random_output(): UpperCAmelCase = torch.nn.Linear(3 , 2 ) UpperCAmelCase = torch.rand(1 , 3 ) return model(_A ).detach().numpy() with temp_seed(4_2 , set_pytorch=_A ): UpperCAmelCase = gen_random_output() with temp_seed(4_2 , set_pytorch=_A ): UpperCAmelCase = gen_random_output() UpperCAmelCase = gen_random_output() np.testing.assert_equal(_A , _A ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def _lowercase ( self ): '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(4_2 ): UpperCAmelCase = gen_random_output() with temp_seed(4_2 ): UpperCAmelCase = gen_random_output() UpperCAmelCase = gen_random_output() np.testing.assert_equal(_A , _A ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''' , [{}] ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = NestedDataStructure(UpperCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''' , [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ] , ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase = NestedDataStructure(UpperCamelCase__ ).flatten() assert output == expected_output def __SCREAMING_SNAKE_CASE ( ) -> str: '''simple docstring''' UpperCAmelCase = A(x=1 , y='''foobar''' ) UpperCAmelCase = {'''x''': 1, '''y''': '''foobar'''} assert asdict(UpperCamelCase__ ) == expected_output UpperCAmelCase = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]} UpperCAmelCase = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(UpperCamelCase__ ) == expected_output with pytest.raises(UpperCamelCase__ ): asdict([1, A(x=10 , y='''foo''' )] ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return text.split() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict: '''simple docstring''' yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __SCREAMING_SNAKE_CASE ( ) -> Tuple: '''simple docstring''' with Pool(2 ) as pool: UpperCAmelCase = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(UpperCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCAmelCase = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(UpperCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCAmelCase = [] for yield_time, content in iflatmap_unordered( UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(UpperCamelCase__ ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(UpperCamelCase__ ) == 4
273
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : Tuple = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class A_ (a_ ): UpperCAmelCase__ = '''big_bird''' def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ): '''simple docstring''' super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = rescale_embeddings UpperCAmelCase = attention_type UpperCAmelCase = use_bias UpperCAmelCase = block_size UpperCAmelCase = num_random_blocks UpperCAmelCase = classifier_dropout class A_ (a_ ): @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
273
1
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowercase_ ( unittest.TestCase ): '''simple docstring''' __snake_case = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING __snake_case = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) ->List[str]: """simple docstring""" a = AudioClassificationPipeline(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase ) # test with a raw waveform a = np.zeros((34_000,) ) a = np.zeros((14_000,) ) return audio_classifier, [audioa, audio] def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) ->Any: """simple docstring""" a , a = examples a = audio_classifier(__UpperCAmelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] , ) a = audio_classifier(__UpperCAmelCase , top_k=1 ) self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] , ) self.run_torchaudio(__UpperCAmelCase ) @require_torchaudio def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict ) ->Dict: """simple docstring""" import datasets # test with a local file a = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) a = dataset[0]['''audio''']['''array'''] a = audio_classifier(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] , ) @require_torch def __lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" a = '''anton-l/wav2vec2-random-tiny-classifier''' a = pipeline('''audio-classification''' , model=__UpperCAmelCase ) a = np.ones((8_000,) ) a = audio_classifier(__UpperCAmelCase , top_k=4 ) a = [ {'''score''': 0.0842, '''label''': '''no'''}, {'''score''': 0.0838, '''label''': '''up'''}, {'''score''': 0.0837, '''label''': '''go'''}, {'''score''': 0.0834, '''label''': '''right'''}, ] a = [ {'''score''': 0.0845, '''label''': '''stop'''}, {'''score''': 0.0844, '''label''': '''on'''}, {'''score''': 0.0841, '''label''': '''right'''}, {'''score''': 0.0834, '''label''': '''left'''}, ] self.assertIn(nested_simplify(__UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) a = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate} a = audio_classifier(__UpperCAmelCase , top_k=4 ) self.assertIn(nested_simplify(__UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" import datasets a = '''superb/wav2vec2-base-superb-ks''' a = pipeline('''audio-classification''' , model=__UpperCAmelCase ) a = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' ) a = np.array(dataset[3]['''speech'''] , dtype=np.floataa ) a = audio_classifier(__UpperCAmelCase , top_k=4 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=3 ) , [ {'''score''': 0.981, '''label''': '''go'''}, {'''score''': 0.007, '''label''': '''up'''}, {'''score''': 0.006, '''label''': '''_unknown_'''}, {'''score''': 0.001, '''label''': '''down'''}, ] , ) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def __lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" pass
0
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase = (image_size // patch_size) ** 2 UpperCAmelCase = num_patches + 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = TFViTModel(config=_A ) UpperCAmelCase = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = self.type_sequence_label_size UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase = 1 UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_A ) def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A_ (unittest.TestCase ): @cached_property def _lowercase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) UpperCAmelCase = self.default_image_processor UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' ) # forward pass UpperCAmelCase = model(**_A ) # verify the logits UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
273
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_: Optional[Any] ={ 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: str =[ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
273
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCamelCase : List[str] = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : Tuple = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } lowerCamelCase : Optional[int] = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } lowerCamelCase : Tuple = '▁' class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = BarthezTokenizer def __init__(self : Any , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : int="<s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : int="<s>" , UpperCamelCase : Tuple="<unk>" , UpperCamelCase : Optional[Any]="<pad>" , UpperCamelCase : Dict="<mask>" , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) return (out_vocab_file,)
2
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") __A : Optional[int] = logging.getLogger(__name__) @dataclass class A_ : UpperCAmelCase__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCAmelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A_ : UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowercase ( self ): '''simple docstring''' if self.train_file is not None: UpperCAmelCase = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = True UpperCAmelCase__ = None UpperCAmelCase__ = None def __call__( self , _A ): '''simple docstring''' UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase = [feature.pop(_A ) for feature in features] UpperCAmelCase = len(_A ) UpperCAmelCase = len(features[0]['''input_ids'''] ) UpperCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] UpperCAmelCase = list(chain(*_A ) ) UpperCAmelCase = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa ) return batch def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase = {} if data_args.train_file is not None: UpperCAmelCase = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase = data_args.validation_file UpperCAmelCase = data_args.train_file.split('''.''' )[-1] UpperCAmelCase = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase = [F"""ending{i}""" for i in range(4 )] UpperCAmelCase = '''sent1''' UpperCAmelCase = '''sent2''' if data_args.max_seq_length is None: UpperCAmelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) UpperCAmelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ ): UpperCAmelCase = [[context] * 4 for context in examples[context_name]] UpperCAmelCase = examples[question_header_name] UpperCAmelCase = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) # Tokenize UpperCAmelCase = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ ): UpperCAmelCase , UpperCAmelCase = eval_predictions UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase = train_result.metrics UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''train''' , UpperCamelCase__ ) trainer.save_metrics('''train''' , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''eval''' , UpperCamelCase__ ) trainer.save_metrics('''eval''' , UpperCamelCase__ ) UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
273
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Any = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A_ : UpperCAmelCase__ = MBartConfig UpperCAmelCase__ = {} UpperCAmelCase__ = '''gelu''' def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A ) return config, inputs_dict def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = TFMBartModel(config=_A ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = inputs_dict['''head_mask'''] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A ) UpperCAmelCase , UpperCAmelCase = outputs.to_tuple() UpperCAmelCase = past_key_values[1] def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFMBartModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A ) @require_sentencepiece @require_tokenizers @require_tf class A_ (unittest.TestCase ): UpperCAmelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCAmelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCAmelCase__ = '''facebook/mbart-large-en-ro''' @cached_property def _lowercase ( self ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.translate_src_text(**_A ) self.assertListEqual(self.expected_text , _A ) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' ) UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A ) return generated_words @slow def _lowercase ( self ): '''simple docstring''' self._assert_generated_batch_equal_expected()
273
0
'''simple docstring''' import argparse import struct import unittest class UpperCAmelCase_ : def __init__( self : List[str] , UpperCAmelCase__ : bytes ) -> None: lowerCAmelCase = data # Initialize hash values lowerCAmelCase = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants lowerCAmelCase = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] lowerCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def __UpperCAmelCase ( UpperCAmelCase__ : bytes ) -> bytes: lowerCAmelCase = b'\x80' + (b'\x00' * (6_3 - (len(UpperCAmelCase__ ) + 8) % 6_4)) lowerCAmelCase = struct.pack('>Q' , (len(UpperCAmelCase__ ) * 8) ) return data + padding + big_endian_integer def __UpperCAmelCase ( self : Tuple ) -> None: # Convert into blocks of 64 bytes lowerCAmelCase = [ self.preprocessed_data[x : x + 6_4] for x in range(0 , len(self.preprocessed_data ) , 6_4 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCAmelCase = list(struct.unpack('>16L' , UpperCAmelCase__ ) ) # add 48 0-ed integers words += [0] * 4_8 lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.hashes for index in range(0 , 6_4 ): if index > 1_5: # modify the zero-ed indexes at the end of the array lowerCAmelCase = ( self.ror(words[index - 1_5] , 7 ) ^ self.ror(words[index - 1_5] , 1_8 ) ^ (words[index - 1_5] >> 3) ) lowerCAmelCase = ( self.ror(words[index - 2] , 1_7 ) ^ self.ror(words[index - 2] , 1_9 ) ^ (words[index - 2] >> 1_0) ) lowerCAmelCase = ( words[index - 1_6] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression lowerCAmelCase = self.ror(UpperCAmelCase__ , 6 ) ^ self.ror(UpperCAmelCase__ , 1_1 ) ^ self.ror(UpperCAmelCase__ , 2_5 ) lowerCAmelCase = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) lowerCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 lowerCAmelCase = self.ror(UpperCAmelCase__ , 2 ) ^ self.ror(UpperCAmelCase__ , 1_3 ) ^ self.ror(UpperCAmelCase__ , 2_2 ) lowerCAmelCase = (a & b) ^ (a & c) ^ (b & c) lowerCAmelCase = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) lowerCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values lowerCAmelCase = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] lowerCAmelCase = ''.join([hex(UpperCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] ) def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (3_2 - rotations)) | (value >> rotations) class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : List[Any] ) -> None: import hashlib lowerCAmelCase = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(UpperCAmelCase__ ).hash , hashlib.shaaaa(UpperCAmelCase__ ).hexdigest() ) def a_ ( ): import doctest doctest.testmod() lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) lowerCAmelCase = parser.parse_args() lowerCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowerCAmelCase = f.read() else: lowerCAmelCase = bytes(lowerCamelCase , 'utf-8' ) print(SHAaaa(lowerCamelCase ).hash ) if __name__ == "__main__": main()
4
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class A_ : def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_input_mask UpperCAmelCase = use_labels UpperCAmelCase = use_mc_token_ids UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope UpperCAmelCase = self.vocab_size - 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None if self.use_mc_token_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def _lowercase ( self ): '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLModel(config=_A ) model.to(_A ) model.eval() model(_A , token_type_ids=_A , head_mask=_A ) model(_A , token_type_ids=_A ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel(_A ) model.to(_A ) model.eval() UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def _lowercase ( self , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = CTRLForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 ) def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): '''simple docstring''' pass @slow def _lowercase ( self ): '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = CTRLModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def _lowercase ( self ): '''simple docstring''' pass @require_torch class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(_A ) UpperCAmelCase = torch.tensor( [[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is UpperCAmelCase = [ 1_1_8_5_9, 0, 1_6_1_1, 8, 5, 1_5_0, 2_6_4_4_9, 2, 1_9, 3_4_8, 4_6_9, 3, 2_5_9_5, 4_8, 2_0_7_4_0, 2_4_6_5_3_3, 2_4_6_5_3_3, 1_9, 3_0, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a UpperCAmelCase = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
273
0
from __future__ import annotations def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" _lowercase =len(__snake_case ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__snake_case ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , ) def UpperCAmelCase_ ( __snake_case ) -> None: """simple docstring""" _lowercase =[] depth_first_search([] , [] , [] , __snake_case , __snake_case ) # Print all the boards for board in boards: for column in board: print(__snake_case ) print('''''' ) print(len(__snake_case ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
5
import cva import numpy as np class A_ : def __init__( self , _A , _A ): '''simple docstring''' if k in (0.04, 0.06): UpperCAmelCase = k UpperCAmelCase = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ): '''simple docstring''' return str(self.k ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = cva.imread(_A , 0 ) UpperCAmelCase , UpperCAmelCase = img.shape UpperCAmelCase = [] UpperCAmelCase = img.copy() UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB ) UpperCAmelCase , UpperCAmelCase = np.gradient(_A ) UpperCAmelCase = dx**2 UpperCAmelCase = dy**2 UpperCAmelCase = dx * dy UpperCAmelCase = 0.04 UpperCAmelCase = self.window_size // 2 for y in range(_A , h - offset ): for x in range(_A , w - offset ): UpperCAmelCase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = (wxx * wyy) - (wxy**2) UpperCAmelCase = wxx + wyy UpperCAmelCase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": __A : Tuple = HarrisCorner(0.04, 3) __A , __A : List[Any] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
273
0
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py A : Tuple = 'src/diffusers' # Matches is_xxx_available() A : Any = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla A : Optional[int] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') A : List[Any] = '\n{0} = None\n' A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' A : Union[str, Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def __lowerCAmelCase ( a__ ) -> Dict: __a = _re_backend.findall(a__ ) if len(a__ ) == 0: return None return "_and_".join(a__ ) def __lowerCAmelCase ( ) -> Dict: with open(os.path.join(a__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.readlines() # Get to the point we do the actual imports for type checking __a = 0 __a = {} # Go through the end of the file while line_index < len(a__ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __a = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 __a = [] # Until we unindent, add backend objects to the list while line_index < len(a__ ) and len(lines[line_index] ) > 1: __a = lines[line_index] __a = _re_single_line_import.search(a__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(a__ ) > 0: __a = objects else: line_index += 1 return backend_specific_objects def __lowerCAmelCase ( a__ , a__ ) -> Tuple: if name.isupper(): return DUMMY_CONSTANT.format(a__ ) elif name.islower(): return DUMMY_FUNCTION.format(a__ , a__ ) else: return DUMMY_CLASS.format(a__ , a__ ) def __lowerCAmelCase ( a__=None ) -> Dict: if backend_specific_objects is None: __a = read_init() # For special correspondence backend to module name as used in the function requires_modulename __a = {} for backend, objects in backend_specific_objects.items(): __a = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(a__ , a__ ) for o in objects] ) __a = dummy_file return dummy_files def __lowerCAmelCase ( a__=False ) -> Union[str, Any]: __a = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __a = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. __a = os.path.join(a__ , '''utils''' ) __a = { backend: os.path.join(a__ , F"""dummy_{short_names.get(a__ , a__ )}_objects.py""" ) for backend in dummy_files.keys() } __a = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(a__ ): with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.read() else: __a = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F"""Updating diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py as the main """ '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F"""diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py. Run `make fix-copies` """ '''to fix this.''' ) if __name__ == "__main__": A : List[str] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') A : List[Any] = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
from datetime import datetime import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes: '''simple docstring''' UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": __A : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
273
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'marian' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : str,lowercase_ : Dict=5_8_1_0_1,lowercase_ : List[str]=None,lowercase_ : int=1_0_2_4,lowercase_ : Optional[Any]=1_2,lowercase_ : List[Any]=4_0_9_6,lowercase_ : Optional[Any]=1_6,lowercase_ : Dict=1_2,lowercase_ : Any=4_0_9_6,lowercase_ : Dict=1_6,lowercase_ : str=0.0,lowercase_ : int=0.0,lowercase_ : int=True,lowercase_ : List[str]=True,lowercase_ : str="gelu",lowercase_ : Optional[int]=1_0_2_4,lowercase_ : Dict=0.1,lowercase_ : List[Any]=0.0,lowercase_ : List[Any]=0.0,lowercase_ : int=0.02,lowercase_ : Any=5_8_1_0_0,lowercase_ : str=False,lowercase_ : Optional[Any]=5_8_1_0_0,lowercase_ : Optional[Any]=0,lowercase_ : Union[str, Any]=0,lowercase_ : int=True,**lowercase_ : Tuple,)-> List[Any]: '''simple docstring''' A__ = vocab_size A__ = decoder_vocab_size or vocab_size A__ = max_position_embeddings A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = use_cache A__ = encoder_layers A__ = scale_embedding # scale factor will be sqrt(d_model) if True A__ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,decoder_start_token_id=lowercase_,forced_eos_token_id=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def snake_case__ ( self : int )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. A__ = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: A__ , A__ = self.num_layers for i in range(lowercase_ ): A__ = {0: 'batch', 2: 'past_sequence + sequence'} A__ = {0: 'batch', 2: 'past_sequence + sequence'} else: A__ = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def snake_case__ ( self : str )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = super().outputs else: A__ = super(lowercase_,self ).outputs if self.use_past: A__ , A__ = self.num_layers for i in range(lowercase_ ): A__ = {0: 'batch', 2: 'past_sequence + sequence'} A__ = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def snake_case__ ( self : List[Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ ) # Generate decoder inputs A__ = seq_length if not self.use_past else 1 A__ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ ) A__ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} A__ = dict(**lowercase_,**lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A__ , A__ = common_inputs['input_ids'].shape A__ = common_inputs['decoder_input_ids'].shape[1] A__ , A__ = self.num_attention_heads A__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A__ = decoder_seq_length + 3 A__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A__ = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowercase_,lowercase_ )],dim=1 ) A__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A__ , A__ = self.num_layers A__ = min(lowercase_,lowercase_ ) A__ = max(lowercase_,lowercase_ ) - min_num_layers A__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. A__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowercase_,lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def snake_case__ ( self : Dict,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A__ , A__ = common_inputs['input_ids'].shape # Not using the same length for past_key_values A__ = seqlen + 2 A__ , A__ = self.num_layers A__ , A__ = self.num_attention_heads A__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A__ = common_inputs['attention_mask'].dtype A__ = torch.cat( [common_inputs['attention_mask'], torch.ones(lowercase_,lowercase_,dtype=lowercase_ )],dim=1 ) A__ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def snake_case__ ( self : Tuple,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = compute_effective_axis_dimension( lowercase_,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = tokenizer.num_special_tokens_to_add(lowercase_ ) A__ = compute_effective_axis_dimension( lowercase_,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence A__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size A__ = dict(tokenizer(lowercase_,return_tensors=lowercase_ ) ) return common_inputs def snake_case__ ( self : Optional[int],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ ) else: A__ = self._generate_dummy_inputs_for_causal_lm( lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ ) return common_inputs def snake_case__ ( self : str,lowercase_ : Any,lowercase_ : Union[str, Any],lowercase_ : List[str],lowercase_ : Optional[Any] )-> Any: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = super()._flatten_past_key_values_(lowercase_,lowercase_,lowercase_,lowercase_ ) else: A__ = super(lowercase_,self )._flatten_past_key_values_( lowercase_,lowercase_,lowercase_,lowercase_ ) @property def snake_case__ ( self : str )-> float: '''simple docstring''' return 1E-4
7
from __future__ import annotations from collections.abc import Callable def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float: '''simple docstring''' UpperCAmelCase = x_start UpperCAmelCase = fnc(UpperCamelCase__ ) UpperCAmelCase = 0.0 for _ in range(UpperCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase = (x_end - x_start) / steps + xa UpperCAmelCase = fnc(UpperCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase = xa UpperCAmelCase = fxa return area if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") __A : List[Any] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
273
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = "biogpt" def __init__( self : Any , _UpperCamelCase : Dict=4_2_3_8_4 , _UpperCamelCase : Dict=1_0_2_4 , _UpperCamelCase : Optional[Any]=2_4 , _UpperCamelCase : Union[str, Any]=1_6 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Union[str, Any]=1_0_2_4 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : str=1e-12 , _UpperCamelCase : List[str]=True , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : int=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Any=2 , **_UpperCamelCase : List[str] , ) ->List[Any]: snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = scale_embedding snake_case_ = use_cache snake_case_ = layerdrop snake_case_ = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : Dict = logging.get_logger(__name__) __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Tuple = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : List[Any] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**_A ) UpperCAmelCase = do_lower_case def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
273
0
from bisect import bisect from itertools import accumulate def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = [i[0] for i in r], [i[1] for i in r] __SCREAMING_SNAKE_CASE : Dict = list(accumulate(lowercase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = bisect(lowercase__ , lowercase__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
9
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A : int = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = list(s_dict.keys() ) for key in keys: UpperCAmelCase = R'''.*/layers_(\d+)''' UpperCAmelCase = key if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ ) UpperCAmelCase = R'''(encoder|decoder)\/''' if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups() if groups[0] == "encoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ ) elif groups[0] == "decoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ ) print(F"""{key} -> {new_key}""" ) UpperCAmelCase = s_dict.pop(UpperCamelCase__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase = s_dict[key].shape[0] UpperCAmelCase = s_dict[key] for idx in range(UpperCamelCase__ ): UpperCAmelCase = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(UpperCamelCase__ ) return s_dict __A : Optional[int] = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: '''simple docstring''' import regex as re with open(UpperCamelCase__ , '''r''' ) as f: UpperCAmelCase = f.read() UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ ) UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ ) UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0] UpperCAmelCase = str(activation[1] ) UpperCAmelCase = num_experts UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ ) return config def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]: '''simple docstring''' print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) if gin_file is not None: UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ ) else: UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ ) UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ ) UpperCAmelCase = flax_params['''target'''] UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' ) UpperCAmelCase = rename_keys(UpperCamelCase__ ) UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __A : Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
273
0
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = BartphoTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple: '''simple docstring''' super().setUp() lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"] lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) lowerCamelCase__: List[Any] ={"unk_token": "<unk>"} lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""") lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str: '''simple docstring''' kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Optional[int] ="This is a là test" lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test" return input_text, output_text def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map) lowerCamelCase__: List[Any] ="This is a là test" lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split() lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token] lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
10
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''prompt'''] UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] if "image" in inputs: UpperCAmelCase = inputs['''image'''] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['''mask_image'''] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['''original_image'''] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A ) # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_A , _A , _A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 )
273
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(a) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: super().__init__(*__lowerCamelCase , **__lowerCamelCase) requires_backends(self , "vision") self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING) def _lowerCamelCase ( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None) -> Dict: _A : Optional[int] = {} _A : Tuple = {} if prompt is not None: _A : Dict = prompt if generate_kwargs is not None: _A : Union[str, Any] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A : int = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one") _A : Dict = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return super().__call__(__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Optional[Any]: _A : List[Any] = load_image(__lowerCamelCase) if prompt is not None: if not isinstance(__lowerCamelCase , __lowerCamelCase): raise ValueError( F"Received an invalid text input, got - {type(__lowerCamelCase)} - but expected a single string. " "Note also that one single text can be provided for conditional image to text generation.") _A : Optional[Any] = self.model.config.model_type if model_type == "git": _A : List[Any] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework) _A : Optional[int] = self.tokenizer(text=__lowerCamelCase , add_special_tokens=__lowerCamelCase).input_ids _A : Any = [self.tokenizer.cls_token_id] + input_ids _A : Any = torch.tensor(__lowerCamelCase).unsqueeze(0) model_inputs.update({"input_ids": input_ids}) elif model_type == "pix2struct": _A : List[str] = self.image_processor(images=__lowerCamelCase , header_text=__lowerCamelCase , return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A : Optional[int] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework) _A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework) model_inputs.update(__lowerCamelCase) else: raise ValueError(F"Model type {model_type} does not support conditional text generation") else: _A : Any = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: _A : List[Any] = None return model_inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Dict: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , __lowerCamelCase) and all(x is None for x in model_inputs["input_ids"]) ): _A : Tuple = None if generate_kwargs is None: _A : List[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A : List[str] = model_inputs.pop(self.model.main_input_name) _A : int = self.model.generate(__lowerCamelCase , **__lowerCamelCase , **__lowerCamelCase) return model_outputs def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: _A : List[str] = [] for output_ids in model_outputs: _A : Any = { "generated_text": self.tokenizer.decode( __lowerCamelCase , skip_special_tokens=__lowerCamelCase , ) } records.append(__lowerCamelCase) return records
11
from __future__ import annotations from collections import namedtuple def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple: '''simple docstring''' UpperCAmelCase = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
273
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = 'deberta-v2' def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=12_81_00 , UpperCamelCase_: Optional[int]=15_36 , UpperCamelCase_: str=24 , UpperCamelCase_: Optional[Any]=24 , UpperCamelCase_: int=61_44 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=1E-7 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Any=-1 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: str=True , UpperCamelCase_: Any=None , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: str="gelu" , **UpperCamelCase_: Tuple , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = relative_attention __lowerCamelCase = max_relative_positions __lowerCamelCase = pad_token_id __lowerCamelCase = position_biased_input # Backwards compatibility if type(UpperCamelCase_ ) == str: __lowerCamelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )] __lowerCamelCase = pos_att_type __lowerCamelCase = vocab_size __lowerCamelCase = layer_norm_eps __lowerCamelCase = kwargs.get("""pooler_hidden_size""" , UpperCamelCase_ ) __lowerCamelCase = pooler_dropout __lowerCamelCase = pooler_hidden_act class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: int ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCAmelCase__ ( self: Any ): return 12 def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , UpperCamelCase_: "PreTrainedTokenizerBase" = None , ): __lowerCamelCase = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
12
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
0
import functools def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # Validation if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(_UpperCAmelCase ) == 0: return 0 if min(_UpperCAmelCase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(_UpperCAmelCase ) >= 3_66: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE_: Any = set(_UpperCAmelCase ) @functools.cache def dynamic_programming(_UpperCAmelCase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
13
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if "model" in orig_key: UpperCAmelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: UpperCAmelCase = '''yoso.''' + orig_key return orig_key def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCAmelCase = val UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias'''] UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict'''] UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ ) UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ ) UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A : List[str] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
273
0
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowerCamelCase : Tuple = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None def SCREAMING_SNAKE_CASE ( self : Any) ->Any: '''simple docstring''' A__ , A__ , A__ = _str_to_version_tuple(self.version_str) def __repr__( self : Dict) ->List[str]: '''simple docstring''' return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' return self.major, self.minor, self.patch def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return Version(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return other raise TypeError(f"""{other} (type {type(UpperCAmelCase__)}) cannot be compared to version.""") def __eq__( self : Tuple , UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' try: A__ = self._validate_operand(UpperCAmelCase__) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = self._validate_operand(UpperCAmelCase__) return self.tuple < other.tuple def __hash__( self : int) ->Any: '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , UpperCAmelCase__ : int) ->List[str]: '''simple docstring''' A__ = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def SCREAMING_SNAKE_CASE ( self : Dict) ->str: '''simple docstring''' return self.version_str def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = _VERSION_REG.match(lowercase_ ) if not res: raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(lowercase_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" return ".".join(str(lowercase_ ) for v in version_tuple )
14
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int: '''simple docstring''' UpperCAmelCase = base for _ in range(1 , UpperCamelCase__ ): UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
273
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } SCREAMING_SNAKE_CASE :Dict = { 'abeja/gpt-neox-japanese-2.7b': 2048, } def UpperCAmelCase ( a_ , a_ ) -> Any: """simple docstring""" with open(a_ , "r" , encoding="utf-8" ) as f: __A = json.loads(f.read() ) __A = collections.OrderedDict() __A = collections.OrderedDict() __A = collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as f: __A = f.readlines() __A = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(a_ ): __A = b __A = idx for wd in b: __A = idx return vocab, raw_vocab, ids_to_tokens, emoji class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : str ,A : int ,A : Union[str, Any] ,A : List[str]="<|endoftext|>" ,A : int="<|endoftext|>" ,A : Any="<|startoftext|>" ,A : Dict="<|endoftext|>" ,A : Optional[Any]=False ,**A : str ,): super().__init__( unk_token=A ,pad_token=A ,bos_token=A ,eos_token=A ,do_clean_text=A ,**A ,) if not os.path.isfile(A ): raise ValueError( f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(A ): raise ValueError( f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) __A = do_clean_text __A , __A , __A , __A = load_vocab_and_emoji(A ,A ) __A = SubWordJapaneseTokenizer( vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji ) @property def UpperCamelCase_ ( self : Any ): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def UpperCamelCase_ ( self : List[Any] ): return dict(self.raw_vocab ,**self.added_tokens_encoder ) def UpperCamelCase_ ( self : Tuple ,A : Optional[int] ): return self.subword_tokenizer.tokenize(A ,clean=self.do_clean_text ) def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ): return self.vocab.get(A ,self.vocab.get(self.unk_token ) ) def UpperCamelCase_ ( self : str ,A : List[str] ): return self.subword_tokenizer.convert_id_to_token(A ) def UpperCamelCase_ ( self : Optional[Any] ,A : str ): __A = "".join(A ).strip() return out_string def UpperCamelCase_ ( self : Optional[int] ,A : "Conversation" ): __A = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A ,add_special_tokens=A ) + [self.eos_token_id] ) if len(A ) > self.model_max_length: __A = input_ids[-self.model_max_length :] return input_ids def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ): __A = 0 if os.path.isdir(A ): __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: __A = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) __A = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(A ,"w" ,encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) __A = token_index writer.write(",".join(A ) + "\n" ) index += 1 with open(A ,"w" ,encoding="utf-8" ) as writer: json.dump(self.emoji ,A ) return vocab_file, emoji_file class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] ,A : int ,A : Union[str, Any] ,A : Optional[Any] ): __A = vocab # same as swe __A = ids_to_tokens # same as bpe __A = emoji __A = np.max([len(A ) for w in self.vocab.keys()] ) __A = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) __A = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) __A = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) __A = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) __A = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) __A = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) __A = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" __A = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" __A = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self : Union[str, Any] ): return len(self.ids_to_tokens ) def UpperCamelCase_ ( self : Union[str, Any] ,A : Any ): __A = self.content_repattera.sub("<URL>" ,A ) __A = self.content_repattera.sub("<EMAIL>" ,A ) __A = self.content_repattera.sub("<TEL>" ,A ) __A = self.content_repattera.sub("<DATE>" ,A ) __A = self.content_repattera.sub("<DATE>" ,A ) __A = self.content_repattera.sub("<PRICE>" ,A ) __A = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __A = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" ) return content def UpperCamelCase_ ( self : str ,A : List[str] ,A : Optional[Any]=False ): __A = text.replace(" " ,"<SP>" ) __A = text.replace(" " ,"<SP>" ) __A = text.replace("\r\n" ,"<BR>" ) __A = text.replace("\n" ,"<BR>" ) __A = text.replace("\r" ,"<BR>" ) __A = text.replace("\t" ,"<TAB>" ) __A = text.replace("—" ,"ー" ) __A = text.replace("−" ,"ー" ) for k, v in self.emoji["emoji"].items(): if k in text: __A = text.replace(A ,A ) if clean: __A = self.clean_text(A ) def check_simbol(A : str ): __A = x.encode() if len(A ) == 1 and len(A ) == 2: __A = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or (c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2) ): return True return False def checkuae(A : str ): __A = x.encode() if len(A ) == 1 and len(A ) == 3: __A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xe28080 and c <= 0xe2b07f: return True return False __A = 0 __A = [] while pos < len(A ): __A = min(len(A ) ,pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 __A = [] # (token_id, token, pos) for e in range(A ,A ,-1 ): __A = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(A ) > 2: __A = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(A ) > 0: # the smallest token_id is adopted __A , __A , __A = sorted(A ,key=lambda A : x[0] )[0] result.append(A ) __A = e else: __A = pos + 1 __A = text[pos:end] if check_simbol(A ): result.append("<KIGOU>" ) elif checkuae(A ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) __A = end return result def UpperCamelCase_ ( self : Optional[Any] ,A : List[str] ,A : Any="\n" ): __A = [] __A = [] __A = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(A ) > 0: words.append(bytearray(A ).decode("utf-8" ,errors="replace" ) ) __A = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(A ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(A ) if len(A ) > 0: words.append(bytearray(A ).decode("utf-8" ,errors="replace" ) ) __A = "".join(A ) return text
15
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A : Dict = logging.get_logger(__name__) __A : str = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class A_ (a_ ): UpperCAmelCase__ = '''longformer''' def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase = attention_window UpperCAmelCase = sep_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = eos_token_id UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = onnx_export class A_ (a_ ): def __init__( self , _A , _A = "default" , _A = None ): '''simple docstring''' super().__init__(_A , _A , _A ) UpperCAmelCase = True @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = super().outputs if self.task == "default": UpperCAmelCase = {0: '''batch'''} return outputs @property def _lowercase ( self ): '''simple docstring''' return 1E-4 @property def _lowercase ( self ): '''simple docstring''' return max(super().default_onnx_opset , 1_4 ) def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): '''simple docstring''' UpperCAmelCase = super().generate_dummy_inputs( preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global UpperCAmelCase = 1 return inputs
273
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') lowerCAmelCase_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) lowerCAmelCase_ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) lowerCAmelCase_ = BeautifulSoup(res.text, 'html.parser') lowerCAmelCase_ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(F'''https://google.com{link.get("href")}''')
16
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ (a_ ): UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def __init__( self , _A , _A ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ): '''simple docstring''' UpperCAmelCase = self.unet.config.sample_size UpperCAmelCase = (batch_size, 3, img_size, img_size) UpperCAmelCase = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase = self.scheduler.schedule[t] UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase = self.scheduler.step_correct( _A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , ) UpperCAmelCase = step_output.prev_sample UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
273
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ): __lowercase = tempfile.mkdtemp() __lowercase = BlipImageProcessor() __lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) __lowercase = BlipProcessor(UpperCAmelCase__, UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Tuple, **UpperCAmelCase__ : Any ): return AutoProcessor.from_pretrained(self.tmpdirname, **UpperCAmelCase__ ).tokenizer def _lowercase ( self : List[str], **UpperCAmelCase__ : Tuple ): return AutoProcessor.from_pretrained(self.tmpdirname, **UpperCAmelCase__ ).image_processor def _lowercase ( self : Dict ): shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Dict ): __lowercase = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(UpperCAmelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : List[Any] ): __lowercase = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" ) __lowercase = self.get_image_processor(do_normalize=UpperCAmelCase__, padding_value=1.0 ) __lowercase = BlipProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=UpperCAmelCase__, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, UpperCAmelCase__ ) def _lowercase ( self : Dict ): __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(UpperCAmelCase__, return_tensors="np" ) __lowercase = processor(images=UpperCAmelCase__, return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 ) def _lowercase ( self : Optional[int] ): __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = "lower newer" __lowercase = processor(text=UpperCAmelCase__ ) __lowercase = tokenizer(UpperCAmelCase__, return_token_type_ids=UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowercase ( self : Tuple ): __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = "lower newer" __lowercase = self.prepare_image_inputs() __lowercase = processor(text=UpperCAmelCase__, images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ), ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase__ ): processor() def _lowercase ( self : List[str] ): __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase = processor.batch_decode(UpperCAmelCase__ ) __lowercase = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) def _lowercase ( self : Optional[int] ): __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = "lower newer" __lowercase = self.prepare_image_inputs() __lowercase = processor(text=UpperCAmelCase__, images=UpperCAmelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ), ["pixel_values", "input_ids", "attention_mask"] )
17
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __A : str = random.Random() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' if rng is None: UpperCAmelCase = global_rng UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class A_ (unittest.TestCase ): def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = min_seq_length UpperCAmelCase = max_seq_length UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase = feature_size UpperCAmelCase = padding_value UpperCAmelCase = sampling_rate UpperCAmelCase = do_normalize UpperCAmelCase = num_mel_bins UpperCAmelCase = hop_length UpperCAmelCase = win_length UpperCAmelCase = win_function UpperCAmelCase = fmin UpperCAmelCase = fmax UpperCAmelCase = mel_floor UpperCAmelCase = return_attention_mask def _lowercase ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' def _flatten(_A ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' if equal_length: UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch class A_ (a_ , unittest.TestCase ): UpperCAmelCase__ = SpeechTaFeatureExtractor def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = SpeechTaFeatureExtractionTester(self ) def _lowercase ( self , _A ): '''simple docstring''' self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 ) UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_0_0_0) ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_2_0_0) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa ) UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test feature size UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] UpperCAmelCase = np.asarray(_A ) UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = min(_A ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad( _A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _lowercase ( self , _A ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03, 3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03, 2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04, 4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03, 7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04, 4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 9_3_6_8_0) ) self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77, -3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86, -3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71, -3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) ) self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
273
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ : def __init__( self : List[Any],_A : Dict,_A : List[Any]=3,_A : Optional[int]=32,_A : str=3,_A : Optional[int]=10,_A : int=[10, 20, 30, 40],_A : str=[1, 1, 2, 1],_A : Tuple=True,_A : List[Any]=True,_A : int="relu",_A : List[Any]=3,_A : Dict=None,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Tuple = batch_size SCREAMING_SNAKE_CASE_ : Optional[int] = image_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : Optional[Any] = embeddings_size SCREAMING_SNAKE_CASE_ : int = hidden_sizes SCREAMING_SNAKE_CASE_ : Dict = depths SCREAMING_SNAKE_CASE_ : int = is_training SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Tuple = num_labels SCREAMING_SNAKE_CASE_ : int = scope SCREAMING_SNAKE_CASE_ : Optional[Any] = len(_A ) def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size],self.num_labels ) SCREAMING_SNAKE_CASE_ : Dict = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,) def __UpperCamelCase ( self : Optional[Any],_A : int,_A : Tuple,_A : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = TFResNetModel(config=_A ) SCREAMING_SNAKE_CASE_ : str = model(_A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),) def __UpperCamelCase ( self : Dict,_A : int,_A : Optional[Any],_A : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.num_labels SCREAMING_SNAKE_CASE_ : str = TFResNetForImageClassification(_A ) SCREAMING_SNAKE_CASE_ : List[str] = model(_A,labels=_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = config_and_inputs SCREAMING_SNAKE_CASE_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class a__ ( A__ , A__ , unittest.TestCase ): A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () A = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) A = False A = False A = False A = False A = False def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = TFResNetModelTester(self ) SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self,config_class=_A,has_text_modality=_A ) def __UpperCamelCase ( self : str ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def __UpperCamelCase ( self : int ): """simple docstring""" pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def __UpperCamelCase ( self : List[str] ): """simple docstring""" pass def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A ) SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1],_A ) def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" def check_hidden_states_output(_A : int,_A : Tuple,_A : str ): SCREAMING_SNAKE_CASE_ : List[str] = model_class(_A ) SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(_A,_A ) ) SCREAMING_SNAKE_CASE_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(_A ),expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE_ : List[str] = layer_type SCREAMING_SNAKE_CASE_ : str = True check_hidden_states_output(_A,_A,_A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Tuple = True check_hidden_states_output(_A,_A,_A ) def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : List[str] = TFResNetModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _snake_case ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class a__ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE_ : int = self.default_image_processor SCREAMING_SNAKE_CASE_ : Dict = prepare_img() SCREAMING_SNAKE_CASE_ : int = image_processor(images=_A,return_tensors="tf" ) # forward pass SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**_A ) # verify the logits SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape,_A ) SCREAMING_SNAKE_CASE_ : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(),_A,atol=1E-4 ) )
18
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A : Union[str, Any] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
0
from __future__ import annotations from math import pi, sqrt def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
19
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(UpperCamelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
273
0
import math def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: if ( not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * power_factor def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: if ( not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
20
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def _lowercase ( self , _A=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def _lowercase ( self , _A ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
273
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) def UpperCamelCase_( lowerCamelCase_ ) -> List[List[ImageInput]]: if isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCamelCase_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCamelCase_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _lowerCamelCase( _a ): lowercase_ : Union[str, Any] = ["""pixel_values"""] def __init__( self, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = 1 / 2_55, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> None: """simple docstring""" super().__init__(**lowerCamelCase) _lowercase : str = size if size is not None else {'shortest_edge': 2_56} _lowercase : Any = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase) _lowercase : List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} _lowercase : Optional[Any] = get_size_dict(lowerCamelCase, param_name='crop_size') _lowercase : Optional[int] = do_resize _lowercase : Tuple = size _lowercase : Any = do_center_crop _lowercase : Any = crop_size _lowercase : int = resample _lowercase : int = do_rescale _lowercase : str = rescale_factor _lowercase : Tuple = offset _lowercase : List[str] = do_normalize _lowercase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray: """simple docstring""" _lowercase : Union[str, Any] = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase) if "shortest_edge" in size: _lowercase : Dict = get_resize_output_image_size(lowerCamelCase, size['shortest_edge'], default_to_square=lowerCamelCase) elif "height" in size and "width" in size: _lowercase : List[Any] = (size['height'], size['width']) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''') return resize(lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray: """simple docstring""" _lowercase : Dict = get_size_dict(lowerCamelCase) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''') return center_crop(lowerCamelCase, size=(size['height'], size['width']), data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = True, lowerCamelCase = None, **lowerCamelCase, ) -> Dict: """simple docstring""" _lowercase : str = image.astype(np.floataa) if offset: _lowercase : List[str] = image - (scale / 2) return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray: """simple docstring""" return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') if offset and not do_rescale: raise ValueError('For offset, do_rescale must also be set to True.') # All transformations expect numpy arrays. _lowercase : Tuple = to_numpy_array(lowerCamelCase) if do_resize: _lowercase : Optional[Any] = self.resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase) if do_center_crop: _lowercase : Dict = self.center_crop(lowerCamelCase, size=lowerCamelCase) if do_rescale: _lowercase : Dict = self.rescale(image=lowerCamelCase, scale=lowerCamelCase, offset=lowerCamelCase) if do_normalize: _lowercase : Optional[Any] = self.normalize(image=lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase) _lowercase : Tuple = to_channel_dimension_format(lowerCamelCase, lowerCamelCase) return image def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, **lowerCamelCase, ) -> PIL.Image.Image: """simple docstring""" _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : Optional[int] = resample if resample is not None else self.resample _lowercase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : Optional[int] = offset if offset is not None else self.offset _lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Any = image_mean if image_mean is not None else self.image_mean _lowercase : Union[str, Any] = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = size if size is not None else self.size _lowercase : Any = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase) _lowercase : List[str] = crop_size if crop_size is not None else self.crop_size _lowercase : Any = get_size_dict(lowerCamelCase, param_name='crop_size') if not valid_images(lowerCamelCase): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') _lowercase : List[str] = make_batched(lowerCamelCase) _lowercase : Any = [ [ self._preprocess_image( image=lowerCamelCase, do_resize=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, do_center_crop=lowerCamelCase, crop_size=lowerCamelCase, do_rescale=lowerCamelCase, rescale_factor=lowerCamelCase, offset=lowerCamelCase, do_normalize=lowerCamelCase, image_mean=lowerCamelCase, image_std=lowerCamelCase, data_format=lowerCamelCase, ) for img in video ] for video in videos ] _lowercase : Dict = {'pixel_values': videos} return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase)
21
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : Tuple = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class A_ (a_ ): UpperCAmelCase__ = '''big_bird''' def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ): '''simple docstring''' super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = rescale_embeddings UpperCAmelCase = attention_type UpperCAmelCase = use_bias UpperCAmelCase = block_size UpperCAmelCase = num_random_blocks UpperCAmelCase = classifier_dropout class A_ (a_ ): @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
273
0
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration __SCREAMING_SNAKE_CASE :Any = 50000 __SCREAMING_SNAKE_CASE :List[str] = 5000 __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = os.path.split(__file__) __SCREAMING_SNAKE_CASE :str = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCAmelCase_ ( __lowercase : datasets.Dataset , __lowercase : Any ) -> Dict: '''simple docstring''' for i in range(__lowercase ): _UpperCAmelCase = dataset[i] @get_duration def UpperCAmelCase_ ( __lowercase : datasets.Dataset , __lowercase : int , __lowercase : Union[str, Any] ) -> str: '''simple docstring''' for i in range(0 , len(__lowercase ) , __lowercase ): _UpperCAmelCase = dataset[i : i + batch_size] @get_duration def UpperCAmelCase_ ( __lowercase : datasets.Dataset , __lowercase : int , __lowercase : Union[str, Any] ) -> Dict: '''simple docstring''' with dataset.formatted_as(type=__lowercase ): for i in range(__lowercase ): _UpperCAmelCase = dataset[i] @get_duration def UpperCAmelCase_ ( __lowercase : datasets.Dataset , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Any ) -> Union[str, Any]: '''simple docstring''' with dataset.formatted_as(type=__lowercase ): for i in range(0 , __lowercase , __lowercase ): _UpperCAmelCase = dataset[i : i + batch_size] def UpperCAmelCase_ ( ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES} _UpperCAmelCase = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}), ] _UpperCAmelCase = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("generating dataset" ) _UpperCAmelCase = datasets.Features( {"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} ) _UpperCAmelCase = generate_example_dataset( os.path.join(__lowercase , "dataset.arrow" ) , __lowercase , num_examples=__lowercase , seq_shapes={"list": (100,)} , ) print("first set of iterations" ) for func, kwargs in functions: print(func.__name__ , str(__lowercase ) ) _UpperCAmelCase = func(__lowercase , **__lowercase ) print("shuffling dataset" ) _UpperCAmelCase = dataset.shuffle() print("Second set of iterations (after shuffling" ) for func, kwargs in functions_shuffled: print("shuffled " , func.__name__ , str(__lowercase ) ) _UpperCAmelCase = func( __lowercase , **__lowercase ) with open(__lowercase , "wb" ) as f: f.write(json.dumps(__lowercase ).encode("utf-8" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
22
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase = (image_size // patch_size) ** 2 UpperCAmelCase = num_patches + 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = TFViTModel(config=_A ) UpperCAmelCase = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = self.type_sequence_label_size UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase = 1 UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_A ) def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A_ (unittest.TestCase ): @cached_property def _lowercase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) UpperCAmelCase = self.default_image_processor UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' ) # forward pass UpperCAmelCase = model(**_A ) # verify the logits UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
273
0
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> str: if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path UpperCAmelCase : Tuple = quote(_lowerCAmelCase ) return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' , revision=_lowerCAmelCase )
23
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
273
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCamelCase__ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case_ ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def lowerCamelCase__ ( ) -> Union[str, Any]: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def lowerCamelCase__ ( ) -> Any: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case_ ): http_head('''https://huggingface.co''' )
24
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") __A : Optional[int] = logging.getLogger(__name__) @dataclass class A_ : UpperCAmelCase__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCAmelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A_ : UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowercase ( self ): '''simple docstring''' if self.train_file is not None: UpperCAmelCase = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = True UpperCAmelCase__ = None UpperCAmelCase__ = None def __call__( self , _A ): '''simple docstring''' UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase = [feature.pop(_A ) for feature in features] UpperCAmelCase = len(_A ) UpperCAmelCase = len(features[0]['''input_ids'''] ) UpperCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] UpperCAmelCase = list(chain(*_A ) ) UpperCAmelCase = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa ) return batch def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase = {} if data_args.train_file is not None: UpperCAmelCase = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase = data_args.validation_file UpperCAmelCase = data_args.train_file.split('''.''' )[-1] UpperCAmelCase = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase = [F"""ending{i}""" for i in range(4 )] UpperCAmelCase = '''sent1''' UpperCAmelCase = '''sent2''' if data_args.max_seq_length is None: UpperCAmelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) UpperCAmelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ ): UpperCAmelCase = [[context] * 4 for context in examples[context_name]] UpperCAmelCase = examples[question_header_name] UpperCAmelCase = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) # Tokenize UpperCAmelCase = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ ): UpperCAmelCase , UpperCAmelCase = eval_predictions UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase = train_result.metrics UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''train''' , UpperCamelCase__ ) trainer.save_metrics('''train''' , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''eval''' , UpperCamelCase__ ) trainer.save_metrics('''eval''' , UpperCamelCase__ ) UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
273
0
"""simple docstring""" from math import sqrt def lowercase_ ( _snake_case ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(sqrt(_snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase_ ( _snake_case = 10_001 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : int = 1 while count != nth and number < 3: number += 1 if is_prime(_snake_case ): count += 1 while count != nth: number += 2 if is_prime(_snake_case ): count += 1 return number if __name__ == "__main__": print(f"""{solution() = }""")
25
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A_ : UpperCAmelCase__ = MBartConfig UpperCAmelCase__ = {} UpperCAmelCase__ = '''gelu''' def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A ) return config, inputs_dict def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = TFMBartModel(config=_A ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = inputs_dict['''head_mask'''] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A ) UpperCAmelCase , UpperCAmelCase = outputs.to_tuple() UpperCAmelCase = past_key_values[1] def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFMBartModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A ) @require_sentencepiece @require_tokenizers @require_tf class A_ (unittest.TestCase ): UpperCAmelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCAmelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCAmelCase__ = '''facebook/mbart-large-en-ro''' @cached_property def _lowercase ( self ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.translate_src_text(**_A ) self.assertListEqual(self.expected_text , _A ) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' ) UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A ) return generated_words @slow def _lowercase ( self ): '''simple docstring''' self._assert_generated_batch_equal_expected()
273
0
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = RobertaTokenizer _a = RobertaTokenizerFast _a = True _a = {"cls_token": "<s>"} def a__ ( self ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _A : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _A : List[str] = dict(zip(_a , range(len(_a ) ) ) ) _A : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _A : Optional[int] = {"""unk_token""": """<unk>"""} _A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_a ) ) def a__ ( self , **_a ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def a__ ( self , **_a ) -> str: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def a__ ( self , _a ) -> Dict: _A : Optional[int] = """lower newer""" _A : int = """lower newer""" return input_text, output_text def a__ ( self ) -> List[str]: _A : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A : List[str] = """lower newer""" _A : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _A : str = tokenizer.tokenize(_a ) # , add_prefix_space=True) self.assertListEqual(_a , _a ) _A : Optional[Any] = tokens + [tokenizer.unk_token] _A : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def a__ ( self ) -> List[Any]: _A : Optional[int] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_a ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_a ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def a__ ( self ) -> str: _A : Union[str, Any] = self.tokenizer_class.from_pretrained("""roberta-base""" ) _A : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a ) _A : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a ) _A : Optional[Any] = tokenizer.encode( """sequence builders""" , add_special_tokens=_a , add_prefix_space=_a ) _A : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=_a , add_prefix_space=_a ) _A : List[str] = tokenizer.build_inputs_with_special_tokens(_a ) _A : Dict = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def a__ ( self ) -> List[Any]: _A : Optional[int] = self.get_tokenizer() _A : List[Any] = """Encode this sequence.""" _A : Any = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments _A : Dict = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _A : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_a , _a ) _A : int = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _A : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_a , _a ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) _A : List[Any] = tokenizer.encode(_a , add_special_tokens=_a ) _A : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_a , _a ) # Testing spaces after special tokens _A : Dict = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space _A : Any = tokenizer.convert_tokens_to_ids(_a ) _A : Union[str, Any] = """Encode <mask> sequence""" _A : Optional[Any] = """Encode <mask>sequence""" _A : List[Any] = tokenizer.encode(_a ) _A : int = encoded.index(_a ) _A : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_a , _a ) _A : Optional[Any] = tokenizer.encode(_a ) _A : Optional[Any] = encoded.index(_a ) _A : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_a , _a ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A : Any = self.rust_tokenizer_class.from_pretrained(_a , **_a ) _A : int = self.tokenizer_class.from_pretrained(_a , **_a ) _A : Union[str, Any] = """A, <mask> AllenNLP sentence.""" _A : List[Any] = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) _A : Optional[int] = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) _A : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _A : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( _a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( _a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def a__ ( self ) -> Any: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _A : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _A : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _a ) self.assertEqual(post_processor_state["""add_prefix_space"""] , _a ) self.assertEqual(post_processor_state["""trim_offsets"""] , _a ) def a__ ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` _A : Dict = F'''{text_of_1_token} {text_of_1_token}''' _A : Tuple = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) _A : List[str] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) _A : Dict = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : List[str] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) _A : int = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : List[Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) _A : Dict = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _A : Optional[Any] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Union[str, Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , ) _A : Optional[int] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : int = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , ) _A : Optional[int] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Optional[Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
26
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class A_ : def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_input_mask UpperCAmelCase = use_labels UpperCAmelCase = use_mc_token_ids UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope UpperCAmelCase = self.vocab_size - 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None if self.use_mc_token_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def _lowercase ( self ): '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLModel(config=_A ) model.to(_A ) model.eval() model(_A , token_type_ids=_A , head_mask=_A ) model(_A , token_type_ids=_A ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel(_A ) model.to(_A ) model.eval() UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def _lowercase ( self , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = CTRLForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 ) def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): '''simple docstring''' pass @slow def _lowercase ( self ): '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = CTRLModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def _lowercase ( self ): '''simple docstring''' pass @require_torch class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(_A ) UpperCAmelCase = torch.tensor( [[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is UpperCAmelCase = [ 1_1_8_5_9, 0, 1_6_1_1, 8, 5, 1_5_0, 2_6_4_4_9, 2, 1_9, 3_4_8, 4_6_9, 3, 2_5_9_5, 4_8, 2_0_7_4_0, 2_4_6_5_3_3, 2_4_6_5_3_3, 1_9, 3_0, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a UpperCAmelCase = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
273
0
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
import cva import numpy as np class A_ : def __init__( self , _A , _A ): '''simple docstring''' if k in (0.04, 0.06): UpperCAmelCase = k UpperCAmelCase = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ): '''simple docstring''' return str(self.k ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = cva.imread(_A , 0 ) UpperCAmelCase , UpperCAmelCase = img.shape UpperCAmelCase = [] UpperCAmelCase = img.copy() UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB ) UpperCAmelCase , UpperCAmelCase = np.gradient(_A ) UpperCAmelCase = dx**2 UpperCAmelCase = dy**2 UpperCAmelCase = dx * dy UpperCAmelCase = 0.04 UpperCAmelCase = self.window_size // 2 for y in range(_A , h - offset ): for x in range(_A , w - offset ): UpperCAmelCase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = (wxx * wyy) - (wxy**2) UpperCAmelCase = wxx + wyy UpperCAmelCase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": __A : Tuple = HarrisCorner(0.04, 3) __A , __A : List[Any] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
273
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : str=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : int=0.0_2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Any=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : List[str] ): """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ): """simple docstring""" UpperCamelCase = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = NystromformerModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def A ( self : List[str] ): """simple docstring""" UpperCamelCase = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = 'the [MASK] of Belgium is Brussels' UpperCamelCase = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) UpperCamelCase = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='pt' ) with torch.no_grad(): UpperCamelCase = model(encoding.input_ids ).logits UpperCamelCase = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , 'capital' )
28
from datetime import datetime import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes: '''simple docstring''' UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": __A : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
273
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
from __future__ import annotations from collections.abc import Callable def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float: '''simple docstring''' UpperCAmelCase = x_start UpperCAmelCase = fnc(UpperCamelCase__ ) UpperCAmelCase = 0.0 for _ in range(UpperCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase = (x_end - x_start) / steps + xa UpperCAmelCase = fnc(UpperCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase = xa UpperCAmelCase = fxa return area if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") __A : List[Any] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
273
0
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } __a = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } __a = { 'ctrl': 2_5_6, } __a = { 'Pregnancy': 1_6_8_6_2_9, 'Christianity': 7_6_7_5, 'Explain': 1_0_6_4_2_3, 'Fitness': 6_3_4_4_0, 'Saving': 6_3_1_6_3, 'Ask': 2_7_1_7_1, 'Ass': 9_5_9_8_5, 'Joke': 1_6_3_5_0_9, 'Questions': 4_5_6_2_2, 'Thoughts': 4_9_6_0_5, 'Retail': 5_2_3_4_2, 'Feminism': 1_6_4_3_3_8, 'Writing': 1_1_9_9_2, 'Atheism': 1_9_2_2_6_3, 'Netflix': 4_8_6_1_6, 'Computing': 3_9_6_3_9, 'Opinion': 4_3_2_1_3, 'Alone': 4_4_9_6_7, 'Funny': 5_8_9_1_7, 'Gaming': 4_0_3_5_8, 'Human': 4_0_8_8, 'India': 1_3_3_1, 'Joker': 7_7_1_3_8, 'Diet': 3_6_2_0_6, 'Legal': 1_1_8_5_9, 'Norman': 4_9_3_9, 'Tip': 7_2_6_8_9, 'Weight': 5_2_3_4_3, 'Movies': 4_6_2_7_3, 'Running': 2_3_4_2_5, 'Science': 2_0_9_0, 'Horror': 3_7_7_9_3, 'Confession': 6_0_5_7_2, 'Finance': 1_2_2_5_0, 'Politics': 1_6_3_6_0, 'Scary': 1_9_1_9_8_5, 'Support': 1_2_6_5_4, 'Technologies': 3_2_5_1_6, 'Teenage': 6_6_1_6_0, 'Event': 3_2_7_6_9, 'Learned': 6_7_4_6_0, 'Notion': 1_8_2_7_7_0, 'Wikipedia': 3_7_5_8_3, 'Books': 6_6_6_5, 'Extract': 7_6_0_5_0, 'Confessions': 1_0_2_7_0_1, 'Conspiracy': 7_5_9_3_2, 'Links': 6_3_6_7_4, 'Narcissus': 1_5_0_4_2_5, 'Relationship': 5_4_7_6_6, 'Relationships': 1_3_4_7_9_6, 'Reviews': 4_1_6_7_1, 'News': 4_2_5_6, 'Translation': 2_6_8_2_0, 'multilingual': 1_2_8_4_0_6, } def a ( snake_case__: List[str] ): '''simple docstring''' lowercase_ = set() lowercase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase_ = char lowercase_ = set(snake_case__ ) return pairs class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[Any] = VOCAB_FILES_NAMES a :int = PRETRAINED_VOCAB_FILES_MAP a :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a :Any = CONTROL_CODES def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict="<unk>" , **SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: lowercase_ = json.load(SCREAMING_SNAKE_CASE_ ) lowercase_ = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: lowercase_ = merges_handle.read().split('''\n''' )[1:-1] lowercase_ = [tuple(merge.split() ) for merge in merges] lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) lowercase_ = {} @property def _lowercase ( self : List[Any] ) -> Dict: return len(self.encoder ) def _lowercase ( self : List[Any] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any: if token in self.cache: return self.cache[token] lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase_ , lowercase_ = bigram lowercase_ = [] lowercase_ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase_ = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) lowercase_ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ ) lowercase_ = word[:-4] lowercase_ = word return word def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]: lowercase_ = [] lowercase_ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) ) return split_tokens def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]: return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]: lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) lowercase_ = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowercase_ = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
30
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : Dict = logging.get_logger(__name__) __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Tuple = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : List[Any] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**_A ) UpperCAmelCase = do_lower_case def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
273
0
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Optional[Any] = (UnCLIPScheduler,) def _A ( self : List[Any] , **A : Optional[int] ): _UpperCAmelCase : Dict = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**A ) return config def _A ( self : Any ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=A ) def _A ( self : Any ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=A ) def _A ( self : Tuple ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=A ) def _A ( self : Optional[Any] ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=A ) def _A ( self : Any ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=A ) def _A ( self : Dict ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=A , prev_timestep=A ) def _A ( self : int ): _UpperCAmelCase : Tuple = self.scheduler_classes[0] _UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(variance_type="fixed_small_log" ) _UpperCAmelCase : Any = scheduler_class(**A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def _A ( self : int ): _UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] _UpperCAmelCase : Tuple = self.get_scheduler_config(variance_type="learned_range" ) _UpperCAmelCase : int = scheduler_class(**A ) _UpperCAmelCase : int = 0.5 assert scheduler._get_variance(1 , predicted_variance=A ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=A ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=A ) - -0.0_010_011 < 1E-5 def _A ( self : Optional[int] ): _UpperCAmelCase : str = self.scheduler_classes[0] _UpperCAmelCase : Union[str, Any] = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**A ) _UpperCAmelCase : Union[str, Any] = scheduler.timesteps _UpperCAmelCase : int = self.dummy_model() _UpperCAmelCase : Optional[int] = self.dummy_sample_deter _UpperCAmelCase : Tuple = torch.manual_seed(0 ) for i, t in enumerate(A ): # 1. predict noise residual _UpperCAmelCase : Dict = model(A , A ) # 2. predict previous mean of sample x_t-1 _UpperCAmelCase : List[str] = scheduler.step(A , A , A , generator=A ).prev_sample _UpperCAmelCase : str = pred_prev_sample _UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A ) ) _UpperCAmelCase : List[str] = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def _A ( self : List[str] ): _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : Dict = self.get_scheduler_config() _UpperCAmelCase : Dict = scheduler_class(**A ) scheduler.set_timesteps(25 ) _UpperCAmelCase : List[str] = scheduler.timesteps _UpperCAmelCase : Union[str, Any] = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter _UpperCAmelCase : Tuple = torch.manual_seed(0 ) for i, t in enumerate(A ): # 1. predict noise residual _UpperCAmelCase : Any = model(A , A ) if i + 1 == timesteps.shape[0]: _UpperCAmelCase : List[Any] = None else: _UpperCAmelCase : Any = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _UpperCAmelCase : Union[str, Any] = scheduler.step( A , A , A , prev_timestep=A , generator=A ).prev_sample _UpperCAmelCase : List[Any] = pred_prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A ) ) _UpperCAmelCase : int = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def _A ( self : Dict ): pass def _A ( self : int ): pass
31
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A : int = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = list(s_dict.keys() ) for key in keys: UpperCAmelCase = R'''.*/layers_(\d+)''' UpperCAmelCase = key if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ ) UpperCAmelCase = R'''(encoder|decoder)\/''' if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups() if groups[0] == "encoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ ) elif groups[0] == "decoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ ) print(F"""{key} -> {new_key}""" ) UpperCAmelCase = s_dict.pop(UpperCamelCase__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase = s_dict[key].shape[0] UpperCAmelCase = s_dict[key] for idx in range(UpperCamelCase__ ): UpperCAmelCase = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(UpperCamelCase__ ) return s_dict __A : Optional[int] = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: '''simple docstring''' import regex as re with open(UpperCamelCase__ , '''r''' ) as f: UpperCAmelCase = f.read() UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ ) UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ ) UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0] UpperCAmelCase = str(activation[1] ) UpperCAmelCase = num_experts UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ ) return config def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]: '''simple docstring''' print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) if gin_file is not None: UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ ) else: UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ ) UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ ) UpperCAmelCase = flax_params['''target'''] UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' ) UpperCAmelCase = rename_keys(UpperCamelCase__ ) UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __A : Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
273
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ : List[str] = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = ['MobileViTFeatureExtractor'] UpperCAmelCase_ : int = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''prompt'''] UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] if "image" in inputs: UpperCAmelCase = inputs['''image'''] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['''mask_image'''] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['''original_image'''] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A ) # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_A , _A , _A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 )
273
0
"""simple docstring""" from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizer pass
33
from __future__ import annotations from collections import namedtuple def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple: '''simple docstring''' UpperCAmelCase = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
273
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A =get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _a ( __a , unittest.TestCase ): __a : int = XGLMTokenizer __a : Any = XGLMTokenizerFast __a : Any = True __a : Tuple = True def A ( self : Optional[int] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = XGLMTokenizer(lowercase , keep_accents=lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Any ): '''simple docstring''' UpperCAmelCase = '''<pad>''' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(lowercase ) , 1_008 ) def A ( self : str ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = XGLMTokenizer(lowercase , keep_accents=lowercase ) UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual( lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : Any ): '''simple docstring''' return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def A ( self : str ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowercase , f.name ) UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=lowercase ) UpperCAmelCase = pickle.dumps(lowercase ) pickle.loads(lowercase ) def A ( self : List[str] ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = '''I was born in 92000, and this is falsé.''' UpperCAmelCase = tokenizer.tokenize(lowercase ) UpperCAmelCase = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) UpperCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(lowercase ) UpperCAmelCase = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) @slow def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = '''Hello World!''' UpperCAmelCase = [2, 31_227, 4_447, 35] self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @slow def A ( self : int ): '''simple docstring''' UpperCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off UpperCAmelCase = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @slow def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = { '''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name='''facebook/xglm-564M''' , padding=lowercase , )
34
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
0
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀""" snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。""" snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """こんにちは、世界。""" snake_case__ : Optional[Any] = """こんばんは、㔺界。😀""" snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """こんにちは、世界。""" snake_case__ : Optional[int] = """こんばんは、㔺界。😀""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" ) snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if "model" in orig_key: UpperCAmelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: UpperCAmelCase = '''yoso.''' + orig_key return orig_key def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCAmelCase = val UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias'''] UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict'''] UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ ) UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ ) UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A : List[str] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
273
0
from math import ceil def A ( _lowerCamelCase = 1_001 ): '''simple docstring''' _lowerCAmelCase : int = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): _lowerCAmelCase : List[Any] = 2 * i + 1 _lowerCAmelCase : str = 2 * i _lowerCAmelCase : List[str] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: _snake_case = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number")
36
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int: '''simple docstring''' UpperCAmelCase = base for _ in range(1 , UpperCamelCase__ ): UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
273
0
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def _SCREAMING_SNAKE_CASE ( *UpperCamelCase ): """simple docstring""" with open(UpperCamelCase , """r""" ) as fh: fcntl.flock(UpperCamelCase , fcntl.LOCK_EX ) try: print(*UpperCamelCase ) finally: fcntl.flock(UpperCamelCase , fcntl.LOCK_UN ) _lowerCAmelCase = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) _lowerCAmelCase = torch.device('''cuda''', local_rank) _lowerCAmelCase = socket.gethostname() _lowerCAmelCase = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank _lowerCAmelCase = dist.get_rank() _lowerCAmelCase = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
37
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A : Dict = logging.get_logger(__name__) __A : str = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class A_ (a_ ): UpperCAmelCase__ = '''longformer''' def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase = attention_window UpperCAmelCase = sep_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = eos_token_id UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = onnx_export class A_ (a_ ): def __init__( self , _A , _A = "default" , _A = None ): '''simple docstring''' super().__init__(_A , _A , _A ) UpperCAmelCase = True @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = super().outputs if self.task == "default": UpperCAmelCase = {0: '''batch'''} return outputs @property def _lowercase ( self ): '''simple docstring''' return 1E-4 @property def _lowercase ( self ): '''simple docstring''' return max(super().default_onnx_opset , 1_4 ) def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): '''simple docstring''' UpperCAmelCase = super().generate_dummy_inputs( preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global UpperCAmelCase = 1 return inputs
273
0
from math import isqrt def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bool: """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(__magic_name__ ) + 1 ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 10**6 ) -> int: """simple docstring""" UpperCamelCase :Optional[int] = 0 UpperCamelCase :str = 1 UpperCamelCase :Optional[int] = 7 while prime_candidate < max_prime: primes_count += is_prime(__magic_name__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
38
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ (a_ ): UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def __init__( self , _A , _A ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ): '''simple docstring''' UpperCAmelCase = self.unet.config.sample_size UpperCAmelCase = (batch_size, 3, img_size, img_size) UpperCAmelCase = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase = self.scheduler.schedule[t] UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase = self.scheduler.step_correct( _A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , ) UpperCAmelCase = step_output.prev_sample UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
273
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "x" , __lowerCAmelCase = 10**-10 , __lowerCAmelCase = 1 , )-> complex: """simple docstring""" _UpperCAmelCase = symbols(__lowerCAmelCase ) _UpperCAmelCase = lambdify(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = lambdify(__lowerCAmelCase , diff(__lowerCAmelCase , __lowerCAmelCase ) ) _UpperCAmelCase = starting_point while True: if diff_function(__lowerCAmelCase ) != 0: _UpperCAmelCase = prev_guess - multiplicity * func(__lowerCAmelCase ) / diff_function( __lowerCAmelCase ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess _UpperCAmelCase = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial # Find fourth Root of 5 print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''') # Find value of e print( '''The root of log(y) - 1 = 0 is ''', F'''{newton_raphson('log(y) - 1', 2, variable='y')}''', ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', F'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''', ) # Find root of cos(x) print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
39
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __A : str = random.Random() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' if rng is None: UpperCAmelCase = global_rng UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class A_ (unittest.TestCase ): def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = min_seq_length UpperCAmelCase = max_seq_length UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase = feature_size UpperCAmelCase = padding_value UpperCAmelCase = sampling_rate UpperCAmelCase = do_normalize UpperCAmelCase = num_mel_bins UpperCAmelCase = hop_length UpperCAmelCase = win_length UpperCAmelCase = win_function UpperCAmelCase = fmin UpperCAmelCase = fmax UpperCAmelCase = mel_floor UpperCAmelCase = return_attention_mask def _lowercase ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' def _flatten(_A ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs def _lowercase ( self , _A=False , _A=False ): '''simple docstring''' if equal_length: UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch class A_ (a_ , unittest.TestCase ): UpperCAmelCase__ = SpeechTaFeatureExtractor def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = SpeechTaFeatureExtractionTester(self ) def _lowercase ( self , _A ): '''simple docstring''' self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 ) UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths] UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCAmelCase = [None, 1_6_0_0, None] for max_length, padding in zip(_A , _A ): UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_0_0_0) ) UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = feat_extract( _A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_2_0_0) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa ) UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs] # Test feature size UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] UpperCAmelCase = np.asarray(_A ) UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A ) UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.feat_extract_dict UpperCAmelCase = True UpperCAmelCase = self.feature_extraction_class(**_A ) UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase = [len(_A ) for x in speech_inputs] UpperCAmelCase = feat_extract.model_input_names[0] UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase = min(_A ) UpperCAmelCase = feat_extract.num_mel_bins # hack! UpperCAmelCase = feat_extract.pad( _A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _lowercase ( self , _A ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03, 3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03, 2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04, 4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03, 7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04, 4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 9_3_6_8_0) ) self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = torch.tensor( [-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77, -3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86, -3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71, -3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] ) # fmt: on UpperCAmelCase = self._load_datasamples(1 ) UpperCAmelCase = SpeechTaFeatureExtractor() UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) ) self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
273
0
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __lowercase = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase ( A_ , A_ , A_=None )-> List[Any]: '''simple docstring''' if rng is None: a : Union[str, Any] = random.Random() a : Tuple = 1 for dim in shape: total_dims *= dim a : Optional[Any] = [] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) a : Dict = np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase ( A_ , A_=None )-> List[str]: '''simple docstring''' a : Optional[int] = ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch a : Tuple = 1 return attn_mask @require_flax class _A : """simple docstring""" UpperCAmelCase : Dict = None UpperCAmelCase : Dict = () def __snake_case ( self : Optional[int]): a , a : int = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 a : Dict = 2 a : Tuple = inputs["input_ids"].shape[-1] // 2 a : List[Any] = inputs["input_ids"][:max_batch_size, :sequence_length] a : str = jnp.ones_like(__UpperCAmelCase) a : str = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens a : Any = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` a : int = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def __snake_case ( self : Optional[Any]): a , a , a , a : Optional[int] = self._get_input_ids_and_config() a : Union[str, Any] = False a : str = max_length a : Dict = 0 for model_class in self.all_generative_model_classes: a : Union[str, Any] = model_class(__UpperCAmelCase) a : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning a : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase) a : int = pt_model_class(__UpperCAmelCase).eval() a : str = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params) a : Tuple = flax_model.generate(__UpperCAmelCase).sequences a : Tuple = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: a : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist()) def __snake_case ( self : Dict): a , a , a , a : Dict = self._get_input_ids_and_config() a : Optional[Any] = False a : Dict = max_length for model_class in self.all_generative_model_classes: a : Optional[int] = model_class(__UpperCAmelCase) a : List[str] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : str = jit(model.generate) a : List[str] = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Optional[int]): a , a , a , a : Dict = self._get_input_ids_and_config() a : Union[str, Any] = True a : List[Any] = max_length for model_class in self.all_generative_model_classes: a : Dict = model_class(__UpperCAmelCase) a : str = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Optional[int] = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Union[str, Any]): a , a , a , a : List[Any] = self._get_input_ids_and_config() a : List[str] = False a : str = max_length a : List[Any] = 2 for model_class in self.all_generative_model_classes: a : Tuple = model_class(__UpperCAmelCase) a : Union[str, Any] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : List[Any] = jit(model.generate) a : Optional[Any] = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : int): a , a , a , a : Any = self._get_input_ids_and_config() a : int = False a : str = max_length a : str = 2 a : Optional[int] = 2 for model_class in self.all_generative_model_classes: a : str = model_class(__UpperCAmelCase) a : str = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences) def __snake_case ( self : List[Any]): a , a , a , a : Optional[int] = self._get_input_ids_and_config() a : Any = True a : List[Any] = max_length a : Tuple = 0.8 a : int = 10 a : Union[str, Any] = 0.3 a : Any = 1 a : Optional[int] = 8 a : Any = 9 for model_class in self.all_generative_model_classes: a : int = model_class(__UpperCAmelCase) a : Any = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Optional[Any] = jit(model.generate) a : Any = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Optional[int]): a , a , a , a : str = self._get_input_ids_and_config() a : Tuple = max_length a : int = 1 a : int = 8 a : List[Any] = 9 for model_class in self.all_generative_model_classes: a : List[Any] = model_class(__UpperCAmelCase) a : Optional[Any] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Any = jit(model.generate) a : List[str] = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Dict): a , a , a , a : int = self._get_input_ids_and_config() a : Optional[Any] = max_length a : Dict = 2 a : Tuple = 1 a : Optional[Any] = 8 a : Dict = 9 for model_class in self.all_generative_model_classes: a : Any = model_class(__UpperCAmelCase) a : List[str] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Dict = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Optional[int]): a , a , a , a : List[Any] = self._get_input_ids_and_config() # pad attention mask on the left a : Optional[int] = attention_mask.at[(0, 0)].set(0) a : List[str] = False a : str = max_length for model_class in self.all_generative_model_classes: a : Any = model_class(__UpperCAmelCase) a : Any = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : List[Any] = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : List[Any]): a , a , a , a : List[Any] = self._get_input_ids_and_config() # pad attention mask on the left a : Tuple = attention_mask.at[(0, 0)].set(0) a : Any = True a : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: a : Dict = model_class(__UpperCAmelCase) a : List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Dict = jit(model.generate) a : Dict = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Dict): a , a , a , a : Any = self._get_input_ids_and_config() # pad attention mask on the left a : Dict = attention_mask.at[(0, 0)].set(0) a : str = 2 a : Optional[int] = max_length for model_class in self.all_generative_model_classes: a : List[str] = model_class(__UpperCAmelCase) a : int = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : str = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") a : int = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") a : Union[str, Any] = "Hello world" a : str = tokenizer(__UpperCAmelCase , return_tensors="np").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__UpperCAmelCase , "do_samples"): model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__UpperCAmelCase , "foo"): a : Tuple = {"foo": "bar"} model.generate(__UpperCAmelCase , **__UpperCAmelCase)
40
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A : Union[str, Any] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
0
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: return 1 / (1 + np.exp(-z )) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple: return (-y * np.log(UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : Any = np.dot(UpperCamelCase , UpperCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase ) ) ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=70000 ) -> str: lowerCamelCase__ : str = np.zeros(x.shape[1] ) for iterations in range(UpperCamelCase ): lowerCamelCase__ : Optional[Any] = np.dot(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[int] = sigmoid_function(UpperCamelCase ) lowerCamelCase__ : Tuple = np.dot(x.T , h - y ) / y.size lowerCamelCase__ : Dict = theta - alpha * gradient # updating the weights lowerCamelCase__ : Union[str, Any] = np.dot(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[Any] = sigmoid_function(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = cost_function(UpperCamelCase , UpperCamelCase ) if iterations % 100 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _A : Any =datasets.load_iris() _A : Optional[int] =iris.data[:, :2] _A : Any =(iris.target != 0) * 1 _A : Optional[int] =0.1 _A : Optional[int] =logistic_reg(alpha, x, y, max_iterations=70_000) print('''theta: ''', theta) # printing the theta i.e our weights vector def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: return sigmoid_function( np.dot(UpperCamelCase , UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((_A) , (_A)) : Tuple =(x[:, 0].min(), x[:, 0].max()) ((_A) , (_A)) : int =(x[:, 1].min(), x[:, 1].max()) ((_A) , (_A)) : List[Any] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _A : int =np.c_[xxa.ravel(), xxa.ravel()] _A : Optional[int] =predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
41
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(UpperCamelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
273
0
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowercase : str = pytest.mark.integration lowercase : str = {"comet"} lowercase : List[Any] = importlib.util.find_spec("fairseq") is not None lowercase : str = {"code_eval"} lowercase : Optional[Any] = os.name == "nt" lowercase : Optional[Any] = {"bertscore", "frugalscore", "perplexity"} lowercase : str = importlib.util.find_spec("transformers") is not None def SCREAMING_SNAKE_CASE__ ( __A ) -> int: @wraps(__A ) def wrapper(self , __A ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , __A ) return wrapper def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]: @wraps(__A ) def wrapper(self , __A ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , __A ) return wrapper def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]: @wraps(__A ) def wrapper(self , __A ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , __A ) return wrapper def SCREAMING_SNAKE_CASE__ ( ) -> Any: _snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @local class __UpperCAmelCase ( parameterized.TestCase ): __lowercase = {} __lowercase = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = '[...]' _snake_case = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowerCAmelCase_ ) ).module_path ) _snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase_ ) # check parameters _snake_case = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCAmelCase_ , metric_module.__name__ ): with self.use_local_metrics(): try: _snake_case = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = '[...]' _snake_case = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowerCAmelCase_ ) ).module_path ) # run doctest with self.use_local_metrics(): _snake_case = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase_ ): yield else: yield @contextmanager def lowerCamelCase ( self ): """simple docstring""" def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ): return load_metric(os.path.join('metrics' , lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ ) with patch('datasets.load_metric' ) as mock_load_metric: _snake_case = load_local_metric yield @classmethod def lowerCamelCase ( cls , lowerCAmelCase_ ): """simple docstring""" def wrapper(lowerCAmelCase_ ): _snake_case = contextmanager(lowerCAmelCase_ ) _snake_case = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCAmelCase ( _lowerCamelCase ): def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _snake_case = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict: import torch def bert_cos_score_idf(__A , __A , *__A , **__A ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__A ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _snake_case = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]: def load_from_checkpoint(__A ): class __UpperCAmelCase : def lowerCamelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" assert len(lowerCAmelCase_ ) == 2 _snake_case = [0.19, 0.92] return scores, sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _snake_case = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _snake_case = load_from_checkpoint yield def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: _snake_case = load_metric(os.path.join('metrics' , 'seqeval' ) ) _snake_case = 'ERROR' _snake_case = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(__A , match=re.escape(__A ) ): metric.compute(predictions=[] , references=[] , scheme=__A )
42
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def _lowercase ( self , _A=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def _lowercase ( self , _A ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
273
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowercase = logging.get_logger(__name__) __lowercase = {'''tokenizer_file''': '''tokenizer.json'''} __lowercase = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : List[str] = ["""input_ids""", """attention_mask"""] a__ : int = None def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]: super().__init__( __lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , ) __UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space: __UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type''')) __UpperCamelCase :str = add_prefix_space __UpperCamelCase :List[str] = pre_tok_class(**__lowercase) __UpperCamelCase :Tuple = add_prefix_space def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding: __UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''') return super()._batch_encode_plus(*__lowercase , **__lowercase) def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding: __UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''') return super()._encode_plus(*__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]: __UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase) return tuple(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> List[int]: __UpperCamelCase :str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id]) if len(__lowercase) > self.model_max_length: __UpperCamelCase :Any = input_ids[-self.model_max_length :] return input_ids
43
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : Tuple = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class A_ (a_ ): UpperCAmelCase__ = '''big_bird''' def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ): '''simple docstring''' super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = rescale_embeddings UpperCAmelCase = attention_type UpperCAmelCase = use_bias UpperCAmelCase = block_size UpperCAmelCase = num_random_blocks UpperCAmelCase = classifier_dropout class A_ (a_ ): @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
273
0
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> int: return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code ) class __A ( SCREAMING_SNAKE_CASE_ ): @staticmethod def __A ( a__ ): _lowerCAmelCase : Optional[int] = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" , type=a__ , default=a__ , help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , ) download_parser.add_argument("""model""" , type=a__ , help="""Name of the model to download""" ) download_parser.set_defaults(func=a__ ) def __init__( self , a__ , a__ , a__ , a__ ): _lowerCAmelCase : str = model _lowerCAmelCase : List[Any] = cache _lowerCAmelCase : Tuple = force _lowerCAmelCase : List[Any] = trust_remote_code def __A ( self ): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
44
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase = (image_size // patch_size) ** 2 UpperCAmelCase = num_patches + 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = TFViTModel(config=_A ) UpperCAmelCase = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = self.type_sequence_label_size UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase = self.image_size // 2 UpperCAmelCase = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase = 1 UpperCAmelCase = TFViTForImageClassification(_A ) UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _lowercase ( self ): '''simple docstring''' pass def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(_A ) UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_A ) def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A_ (unittest.TestCase ): @cached_property def _lowercase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) UpperCAmelCase = self.default_image_processor UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' ) # forward pass UpperCAmelCase = model(**_A ) # verify the logits UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
273
0
"""simple docstring""" import socket def lowercase ( ) -> int: __a = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __a = socket.gethostname() __a = 12312 sock.connect((host, port) ) sock.send(b'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __a = sock.recv(1024 ) if not data: break out_file.write(lowerCAmelCase__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
45
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
273
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS _SCREAMING_SNAKE_CASE = False def _snake_case ( self ) -> Union[str, Any]: torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowerCAmelCase = CLIPTextModel(lowercase ) lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _snake_case ( self , lowercase , lowercase=0 ) -> Union[str, Any]: if str(lowercase ).startswith("""mps""" ): lowerCAmelCase = torch.manual_seed(lowercase ) else: lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase ) lowerCAmelCase = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def _snake_case ( self ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def _snake_case ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) lowerCAmelCase = sag_pipe.to(lowercase ) sag_pipe.set_progress_bar_config(disable=lowercase ) lowerCAmelCase = """.""" lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sag_pipe( [prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _snake_case ( self ) -> str: lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) lowerCAmelCase = sag_pipe.to(lowercase ) sag_pipe.set_progress_bar_config(disable=lowercase ) lowerCAmelCase = """.""" lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sag_pipe( [prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _snake_case ( self ) -> Tuple: lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) lowerCAmelCase = sag_pipe.to(lowercase ) sag_pipe.set_progress_bar_config(disable=lowercase ) lowerCAmelCase = """.""" lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sag_pipe( [prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) lowerCAmelCase = output.images assert image.shape == (1, 512, 768, 3)
46
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") __A : Optional[int] = logging.getLogger(__name__) @dataclass class A_ : UpperCAmelCase__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCAmelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A_ : UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCAmelCase__ = field( default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase__ = field( default=a_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowercase ( self ): '''simple docstring''' if self.train_file is not None: UpperCAmelCase = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A_ : UpperCAmelCase__ = 42 UpperCAmelCase__ = True UpperCAmelCase__ = None UpperCAmelCase__ = None def __call__( self , _A ): '''simple docstring''' UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase = [feature.pop(_A ) for feature in features] UpperCAmelCase = len(_A ) UpperCAmelCase = len(features[0]['''input_ids'''] ) UpperCAmelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] UpperCAmelCase = list(chain(*_A ) ) UpperCAmelCase = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa ) return batch def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase = {} if data_args.train_file is not None: UpperCAmelCase = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase = data_args.validation_file UpperCAmelCase = data_args.train_file.split('''.''' )[-1] UpperCAmelCase = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase = [F"""ending{i}""" for i in range(4 )] UpperCAmelCase = '''sent1''' UpperCAmelCase = '''sent2''' if data_args.max_seq_length is None: UpperCAmelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) UpperCAmelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ ): UpperCAmelCase = [[context] * 4 for context in examples[context_name]] UpperCAmelCase = examples[question_header_name] UpperCAmelCase = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) UpperCAmelCase = list(chain(*UpperCamelCase__ ) ) # Tokenize UpperCAmelCase = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ ): UpperCAmelCase , UpperCAmelCase = eval_predictions UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase = train_result.metrics UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''train''' , UpperCamelCase__ ) trainer.save_metrics('''train''' , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''eval''' , UpperCamelCase__ ) trainer.save_metrics('''eval''' , UpperCamelCase__ ) UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
273
0
'''simple docstring''' import unittest import numpy as np def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray | None = None , ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase ) if shape_a[0] != shape_b[0]: _SCREAMING_SNAKE_CASE =( 'Expected the same number of rows for A and B. ' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(_UpperCamelCase ) if shape_b[1] != shape_c[1]: _SCREAMING_SNAKE_CASE =( 'Expected the same number of columns for B and C. ' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =pseudo_inv if a_inv is None: try: _SCREAMING_SNAKE_CASE =np.linalg.inv(_UpperCamelCase ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class A__ ( unittest.TestCase ): def A ( self : Dict ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) _SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] ) _SCREAMING_SNAKE_CASE =schur_complement(_a , _a , _a ) _SCREAMING_SNAKE_CASE =np.block([[a, b], [b.T, c]] ) _SCREAMING_SNAKE_CASE =np.linalg.det(_a ) _SCREAMING_SNAKE_CASE =np.linalg.det(_a ) _SCREAMING_SNAKE_CASE =np.linalg.det(_a ) self.assertAlmostEqual(_a , det_a * det_s ) def A ( self : str ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) _SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] ) with self.assertRaises(_a ): schur_complement(_a , _a , _a ) def A ( self : List[Any] ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] ) _SCREAMING_SNAKE_CASE =np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_a ): schur_complement(_a , _a , _a ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
47
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A_ : UpperCAmelCase__ = MBartConfig UpperCAmelCase__ = {} UpperCAmelCase__ = '''gelu''' def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A ) return config, inputs_dict def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = TFMBartModel(config=_A ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = inputs_dict['''head_mask'''] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A ) UpperCAmelCase , UpperCAmelCase = outputs.to_tuple() UpperCAmelCase = past_key_values[1] def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A_ (a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFMBartModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A ) def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A ) @require_sentencepiece @require_tokenizers @require_tf class A_ (unittest.TestCase ): UpperCAmelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCAmelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCAmelCase__ = '''facebook/mbart-large-en-ro''' @cached_property def _lowercase ( self ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.translate_src_text(**_A ) self.assertListEqual(self.expected_text , _A ) def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' ) UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A ) return generated_words @slow def _lowercase ( self ): '''simple docstring''' self._assert_generated_batch_equal_expected()
273
0
import datasets SCREAMING_SNAKE_CASE__ : Union[str, Any] = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' SCREAMING_SNAKE_CASE__ : int = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' SCREAMING_SNAKE_CASE__ : str = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def _lowercase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
48
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class A_ : def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_input_mask UpperCAmelCase = use_labels UpperCAmelCase = use_mc_token_ids UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope UpperCAmelCase = self.vocab_size - 1 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None if self.use_mc_token_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def _lowercase ( self ): '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLModel(config=_A ) model.to(_A ) model.eval() model(_A , token_type_ids=_A , head_mask=_A ) model(_A , token_type_ids=_A ) UpperCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def _lowercase ( self , _A , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel(_A ) model.to(_A ) model.eval() UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def _lowercase ( self , _A , _A , _A , _A , *_A ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = CTRLForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase ( self , _A , _A , _A , _A , _A ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 ) def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_A ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): '''simple docstring''' pass @slow def _lowercase ( self ): '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = CTRLModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def _lowercase ( self ): '''simple docstring''' pass @require_torch class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(_A ) UpperCAmelCase = torch.tensor( [[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is UpperCAmelCase = [ 1_1_8_5_9, 0, 1_6_1_1, 8, 5, 1_5_0, 2_6_4_4_9, 2, 1_9, 3_4_8, 4_6_9, 3, 2_5_9_5, 4_8, 2_0_7_4_0, 2_4_6_5_3_3, 2_4_6_5_3_3, 1_9, 3_0, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a UpperCAmelCase = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
273
0
import math def __snake_case ( _UpperCAmelCase = 100 ): __a = sum(i * i for i in range(1 , n + 1 ) ) __a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
49
import cva import numpy as np class A_ : def __init__( self , _A , _A ): '''simple docstring''' if k in (0.04, 0.06): UpperCAmelCase = k UpperCAmelCase = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ): '''simple docstring''' return str(self.k ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = cva.imread(_A , 0 ) UpperCAmelCase , UpperCAmelCase = img.shape UpperCAmelCase = [] UpperCAmelCase = img.copy() UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB ) UpperCAmelCase , UpperCAmelCase = np.gradient(_A ) UpperCAmelCase = dx**2 UpperCAmelCase = dy**2 UpperCAmelCase = dx * dy UpperCAmelCase = 0.04 UpperCAmelCase = self.window_size // 2 for y in range(_A , h - offset ): for x in range(_A , w - offset ): UpperCAmelCase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase = (wxx * wyy) - (wxy**2) UpperCAmelCase = wxx + wyy UpperCAmelCase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": __A : Tuple = HarrisCorner(0.04, 3) __A , __A : List[Any] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
273
0
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: lowerCamelCase__ : Tuple = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, oder?', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase__ : Tuple = { 'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'], 'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'], 'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'], 'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'], } lowerCamelCase__ : str = F"""{src_lang}-{tgt_lang}""" lowerCamelCase__ : Any = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , 'README.md' ) print(F"""Generating {path}""" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(_UpperCAmelCase ) # make sure we are under the root of the project _UpperCAmelCase : Tuple = Path(__file__).resolve().parent.parent.parent _UpperCAmelCase : List[Any] = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : int = model_name.split("""-""") _UpperCAmelCase : Tuple = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
50
from datetime import datetime import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes: '''simple docstring''' UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": __A : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
273
0
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller snake_case_ : Optional[Any] = 3 def A (__A : int ) -> int: """simple docstring""" print('''Generating primitive root of p''' ) while True: UpperCAmelCase_ = random.randrange(3 , __A ) if pow(__A , 2 , __A ) == 1: continue if pow(__A , __A , __A ) == 1: continue return g def A (__A : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: """simple docstring""" print('''Generating prime p...''' ) UpperCAmelCase_ = rabin_miller.generate_large_prime(__A ) # select large prime number. UpperCAmelCase_ = primitive_root(__A ) # one primitive root on modulo p. UpperCAmelCase_ = random.randrange(3 , __A ) # private_key -> have to be greater than 2 for safety. UpperCAmelCase_ = cryptomath.find_mod_inverse(pow(__A , __A , __A ) , __A ) UpperCAmelCase_ = (key_size, e_a, e_a, p) UpperCAmelCase_ = (key_size, d) return public_key, private_key def A (__A : str , __A : int ) -> None: """simple docstring""" if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() UpperCAmelCase_ , UpperCAmelCase_ = generate_key(__A ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , '''w''' ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def A () -> None: """simple docstring""" print('''Making key files...''' ) make_key_files('''elgamal''' , 2048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
51
from __future__ import annotations from collections.abc import Callable def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float: '''simple docstring''' UpperCAmelCase = x_start UpperCAmelCase = fnc(UpperCamelCase__ ) UpperCAmelCase = 0.0 for _ in range(UpperCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase = (x_end - x_start) / steps + xa UpperCAmelCase = fnc(UpperCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase = xa UpperCAmelCase = fxa return area if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") __A : List[Any] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
273
0
class A__ : def __init__( self , A_ , A_=None , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = data UpperCamelCase : Optional[int] = previous UpperCamelCase : Optional[Any] = next_node def __str__( self ): '''simple docstring''' return F"""{self.data}""" def __UpperCamelCase( self ): '''simple docstring''' return self.data def __UpperCamelCase( self ): '''simple docstring''' return self.next def __UpperCamelCase( self ): '''simple docstring''' return self.previous class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = head def __iter__( self ): '''simple docstring''' return self def __UpperCamelCase( self ): '''simple docstring''' if not self.current: raise StopIteration else: UpperCamelCase : Optional[Any] = self.current.get_data() UpperCamelCase : List[str] = self.current.get_next() return value class A__ : def __init__( self ): '''simple docstring''' UpperCamelCase : Any = None # First node in list UpperCamelCase : Optional[int] = None # Last node in list def __str__( self ): '''simple docstring''' UpperCamelCase : str = self.head UpperCamelCase : Tuple = [] while current is not None: nodes.append(current.get_data() ) UpperCamelCase : Dict = current.get_next() return " ".join(str(A_ ) for node in nodes ) def __contains__( self , A_ ): '''simple docstring''' UpperCamelCase : int = self.head while current: if current.get_data() == value: return True UpperCamelCase : Tuple = current.get_next() return False def __iter__( self ): '''simple docstring''' return LinkedListIterator(self.head ) def __UpperCamelCase( self ): '''simple docstring''' if self.head: return self.head.get_data() return None def __UpperCamelCase( self ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.head is None: UpperCamelCase : List[Any] = node UpperCamelCase : Union[str, Any] = node else: self.insert_before_node(self.head , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.head is None: self.set_head(A_ ) else: self.insert_after_node(self.tail , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = Node(A_ ) if self.head is None: self.set_head(A_ ) else: self.set_tail(A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = node UpperCamelCase : List[Any] = node.previous if node.get_previous() is None: UpperCamelCase : str = node_to_insert else: UpperCamelCase : Dict = node_to_insert UpperCamelCase : Optional[Any] = node_to_insert def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = node UpperCamelCase : Optional[Any] = node.next if node.get_next() is None: UpperCamelCase : List[str] = node_to_insert else: UpperCamelCase : int = node_to_insert UpperCamelCase : Union[str, Any] = node_to_insert def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = 1 UpperCamelCase : Tuple = Node(A_ ) UpperCamelCase : Optional[int] = self.head while node: if current_position == position: self.insert_before_node(A_ , A_ ) return current_position += 1 UpperCamelCase : Union[str, Any] = node.next self.insert_after_node(self.tail , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while node: if node.get_data() == item: return node UpperCamelCase : int = node.get_next() raise Exception("Node not found" ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if (node := self.get_node(A_ )) is not None: if node == self.head: UpperCamelCase : Optional[Any] = self.head.get_next() if node == self.tail: UpperCamelCase : Dict = self.tail.get_previous() self.remove_node_pointers(A_ ) @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' if node.get_next(): UpperCamelCase : List[str] = node.previous if node.get_previous(): UpperCamelCase : Dict = node.next UpperCamelCase : Any = None UpperCamelCase : Optional[Any] = None def __UpperCamelCase( self ): '''simple docstring''' return self.head is None def A_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
52
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : Dict = logging.get_logger(__name__) __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Tuple = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : List[Any] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class A_ (a_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**_A ) UpperCAmelCase = do_lower_case def _lowercase ( self , _A , _A=None ): '''simple docstring''' UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , _A , _A = None ): '''simple docstring''' UpperCAmelCase = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
273
0
'''simple docstring''' def lowercase__ ( __lowercase : int = 10**9 ) -> int: """simple docstring""" __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'{solution() = }')
53
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A : int = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' UpperCAmelCase = list(s_dict.keys() ) for key in keys: UpperCAmelCase = R'''.*/layers_(\d+)''' UpperCAmelCase = key if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ ) UpperCAmelCase = R'''(encoder|decoder)\/''' if re.match(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups() if groups[0] == "encoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ ) elif groups[0] == "decoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ ) print(F"""{key} -> {new_key}""" ) UpperCAmelCase = s_dict.pop(UpperCamelCase__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase = s_dict[key].shape[0] UpperCAmelCase = s_dict[key] for idx in range(UpperCamelCase__ ): UpperCAmelCase = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(UpperCamelCase__ ) return s_dict __A : Optional[int] = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: '''simple docstring''' import regex as re with open(UpperCamelCase__ , '''r''' ) as f: UpperCAmelCase = f.read() UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ ) UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ ) UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0] UpperCAmelCase = str(activation[1] ) UpperCAmelCase = num_experts UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ ) return config def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]: '''simple docstring''' print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) if gin_file is not None: UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ ) else: UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ ) UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ ) UpperCAmelCase = flax_params['''target'''] UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' ) UpperCAmelCase = rename_keys(UpperCamelCase__ ) UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __A : Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
273
0
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], [] __SCREAMING_SNAKE_CASE = list(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted_examples[0] def is_too_big(lowerCAmelCase_ ): return tok(lowerCAmelCase_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __SCREAMING_SNAKE_CASE = new_src + " " + src __SCREAMING_SNAKE_CASE = new_tgt + " " + tgt if is_too_big(lowerCAmelCase_ ) or is_too_big(lowerCAmelCase_ ): # cant fit, finalize example finished_src.append(lowerCAmelCase_ ) finished_tgt.append(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = src, tgt else: # can fit, keep adding __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(lowerCAmelCase_ ) finished_tgt.append(lowerCAmelCase_ ) return finished_src, finished_tgt def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ) save_path.mkdir(exist_ok=lowerCAmelCase_ ) for split in ["train"]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" __SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()] __SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pack_examples(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""packed {split} split from {len(lowerCAmelCase_ )} examples -> {len(lowerCAmelCase_ )}.""" ) Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) ) Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) ) for split in ["val", "test"]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.source""" ) shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.target""" ) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=lowerCAmelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=lowerCAmelCase_ , default=128 ) parser.add_argument("--data_dir" , type=lowerCAmelCase_ ) parser.add_argument("--save_path" , type=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(lowerCAmelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
54
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class A_ : def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''prompt'''] UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] if "image" in inputs: UpperCAmelCase = inputs['''image'''] else: UpperCAmelCase = None if "mask_image" in inputs: UpperCAmelCase = inputs['''mask_image'''] else: UpperCAmelCase = None if "original_image" in inputs: UpperCAmelCase = inputs['''original_image'''] else: UpperCAmelCase = None UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A ) # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_A , _A , _A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = inputs['''generator'''] UpperCAmelCase = inputs['''num_inference_steps'''] UpperCAmelCase = inputs['''output_type'''] # inputs with prompt converted to embeddings UpperCAmelCase = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCAmelCase = image if mask_image is not None: UpperCAmelCase = mask_image if original_image is not None: UpperCAmelCase = original_image UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = pipe_loaded(**_A )[0] UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1E-4 )
273
0
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): """simple docstring""" _lowerCamelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def __snake_case ( ): if os.name == "nt": lowerCamelCase_ = CursorInfo() lowerCamelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) ) lowerCamelCase_ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def __snake_case ( ): if os.name == "nt": lowerCamelCase_ = CursorInfo() lowerCamelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) ) lowerCamelCase_ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def __snake_case ( ): try: hide_cursor() yield finally: show_cursor()
55
from __future__ import annotations from collections import namedtuple def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple: '''simple docstring''' UpperCAmelCase = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
273
0
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class a ( _lowerCamelCase , unittest.TestCase ): snake_case_ = AlbertTokenizer snake_case_ = AlbertTokenizerFast snake_case_ = True snake_case_ = True snake_case_ = True def A_ ( self : int ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self : Dict , lowercase_ : Optional[int] ): snake_case_ = '''this is a test''' snake_case_ = '''this is a test''' return input_text, output_text def A_ ( self : Union[str, Any] ): snake_case_ = '''<pad>''' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''▁eloquent''' ) self.assertEqual(len(lowercase_ ) , 3_0000 ) def A_ ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def A_ ( self : List[str] ): if not self.test_rust_tokenizer: return snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = '''I was born in 92000, and this is falsé.''' snake_case_ = tokenizer.tokenize(lowercase_ ) snake_case_ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) snake_case_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) snake_case_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowercase_ ) snake_case_ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def A_ ( self : int ): snake_case_ = AlbertTokenizer(lowercase_ , keep_accents=lowercase_ ) snake_case_ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [48, 25, 21, 1289] ) snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] ) snake_case_ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) snake_case_ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , ) def A_ ( self : Optional[Any] ): snake_case_ = AlbertTokenizer(lowercase_ ) snake_case_ = tokenizer.encode('''sequence builders''' ) snake_case_ = tokenizer.encode('''multi-sequence build''' ) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def A_ ( self : Optional[int] ): # fmt: off snake_case_ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
56
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
273
0
"""simple docstring""" import os from collections import deque import torch from torch.utils.data import Dataset class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , __a="" , __a="train" ): assert os.path.isdir(__a ) __lowerCAmelCase = [] __lowerCAmelCase = os.listdir(__a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __lowerCAmelCase = os.path.join(__a , __a ) if not os.path.isfile(__a ): continue self.documents.append(__a ) def __len__( self ): return len(self.documents ) def __getitem__( self , __a ): __lowerCAmelCase = self.documents[idx] __lowerCAmelCase = document_path.split("/" )[-1] with open(__a , encoding="utf-8" ) as source: __lowerCAmelCase = source.read() __lowerCAmelCase , __lowerCAmelCase = process_story(__a ) return document_name, story_lines, summary_lines def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = list(filter(lambda _UpperCamelCase : len(_UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it __lowerCAmelCase = [_add_missing_period(_UpperCamelCase ) for line in nonempty_lines] # gather article lines __lowerCAmelCase = [] __lowerCAmelCase = deque(_UpperCamelCase ) while True: try: __lowerCAmelCase = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(_UpperCamelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __lowerCAmelCase = list(filter(lambda _UpperCamelCase : not t.startswith("@highlight" ) , _UpperCamelCase ) ) return story_lines, summary_lines def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if len(_UpperCamelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) ) return sequence def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = torch.ones_like(_UpperCamelCase ) __lowerCAmelCase = sequence == pad_token_id __lowerCAmelCase = 0 return mask def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [tokenizer.encode(_UpperCamelCase ) for line in story_lines] __lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence] __lowerCAmelCase = [tokenizer.encode(_UpperCamelCase ) for line in summary_lines] __lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] for sequence in batch: __lowerCAmelCase = -1 __lowerCAmelCase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(_UpperCamelCase ) return torch.tensor(_UpperCamelCase )
57
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if "model" in orig_key: UpperCAmelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: UpperCAmelCase = '''yoso.''' + orig_key return orig_key def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCAmelCase = val UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias'''] UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict'''] UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ ) UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ ) UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A : List[str] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
273
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) class a_ ( snake_case_ , snake_case_ ): '''simple docstring''' UpperCamelCase = '''maskformer-swin''' UpperCamelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , A=224 , A=4 , A=3 , A=96 , A=[2, 2, 6, 2] , A=[3, 6, 12, 24] , A=7 , A=4.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=0.02 , A=1e-5 , A=None , A=None , **A , ) -> Any: super().__init__(**A ) _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = embed_dim _SCREAMING_SNAKE_CASE = depths _SCREAMING_SNAKE_CASE = len(A ) _SCREAMING_SNAKE_CASE = num_heads _SCREAMING_SNAKE_CASE = window_size _SCREAMING_SNAKE_CASE = mlp_ratio _SCREAMING_SNAKE_CASE = qkv_bias _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = drop_path_rate _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = use_absolute_embeddings _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(A ) - 1) ) _SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(A ) + 1 )] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=A , out_indices=A , stage_names=self.stage_names )
58
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int: '''simple docstring''' UpperCAmelCase = base for _ in range(1 , UpperCamelCase__ ): UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
273
0
import os from pathlib import Path def UpperCamelCase ( ): from torch.utils.cpp_extension import load snake_case : str = Path(__lowerCamelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr" snake_case : int = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , __lowerCamelCase , with_cuda=__lowerCamelCase , extra_include_paths=[str(__lowerCamelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
59
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A : Dict = logging.get_logger(__name__) __A : str = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class A_ (a_ ): UpperCAmelCase__ = '''longformer''' def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase = attention_window UpperCAmelCase = sep_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = eos_token_id UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = onnx_export class A_ (a_ ): def __init__( self , _A , _A = "default" , _A = None ): '''simple docstring''' super().__init__(_A , _A , _A ) UpperCAmelCase = True @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = super().outputs if self.task == "default": UpperCAmelCase = {0: '''batch'''} return outputs @property def _lowercase ( self ): '''simple docstring''' return 1E-4 @property def _lowercase ( self ): '''simple docstring''' return max(super().default_onnx_opset , 1_4 ) def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): '''simple docstring''' UpperCAmelCase = super().generate_dummy_inputs( preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global UpperCAmelCase = 1 return inputs
273
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case__ : str = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ (a_ ): UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def __init__( self , _A , _A ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ): '''simple docstring''' UpperCAmelCase = self.unet.config.sample_size UpperCAmelCase = (batch_size, 3, img_size, img_size) UpperCAmelCase = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase = self.scheduler.schedule[t] UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase = self.scheduler.step_correct( _A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , ) UpperCAmelCase = step_output.prev_sample UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
273
0