code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ :Optional[int] = logging.getLogger() def UpperCamelCase ( ): '''simple docstring''' lowercase = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase = parser.parse_args() return args.f def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = {} lowercase = os.path.join(lowerCAmelCase__ , '''all_results.json''' ) if os.path.exists(lowerCAmelCase__ ): with open(lowerCAmelCase__ , '''r''' ) as f: lowercase = json.load(lowerCAmelCase__ ) else: raise ValueError(f'can\'t find {path}' ) return results def UpperCamelCase ( ): '''simple docstring''' lowercase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ :int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowercase ( SCREAMING_SNAKE_CASE__ ): @classmethod def A__ ( cls): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU lowercase = tempfile.mkdtemp() lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''') write_basic_config(save_location=cls.configPath) lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def A__ ( cls): shutil.rmtree(cls.tmpdir) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''glue_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertLess(result['''perplexity'''] ,1_0_0) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''clm_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertLess(result['''perplexity'''] ,4_2) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''mlm_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu lowercase = 7 if get_gpu_count() > 1 else 2 lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75) self.assertLess(result['''train_loss'''] ,0.5) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''ner_no_trainer'''))) @unittest.skip(reason='''Fix me @muellerzr''') @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] ,2_8) self.assertGreaterEqual(result['''eval_exact'''] ,2_8) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''qa_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8) self.assertTrue(os.path.exists(os.path.join(A__ ,'''swag_no_trainer'''))) @slow @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0) self.assertGreaterEqual(result['''eval_rouge2'''] ,2) self.assertGreaterEqual(result['''eval_rougeL'''] ,7) self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''summarization_no_trainer'''))) @slow @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_bleu'''] ,3_0) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''translation_no_trainer'''))) @slow def A__ ( self): lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(A__) lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.10) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) lowercase = get_results(A__) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6) self.assertTrue(os.path.exists(os.path.join(A__ ,'''step_1'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''image_classification_no_trainer''')))
101
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def a ( __a ) -> int: '''simple docstring''' for param in module.parameters(): UpperCamelCase__ :Dict = False def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCamelCase__ :Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Dict = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def a ( ) -> str: '''simple docstring''' UpperCamelCase__ :int = datetime.now() UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' ) return timestamp
97
0
"""simple docstring""" def lowercase ( _snake_case : str , _snake_case : str ) ->int: """simple docstring""" if len(_snake_case ) != len(_snake_case ): raise ValueError('''String lengths must match!''' ) __snake_case : List[Any] = 0 for chara, chara in zip(_snake_case , _snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
102
'''simple docstring''' from scipy.stats import pearsonr import datasets __snake_case = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_pvalue: UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
97
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer A__ : str = logging.get_logger(__name__) A__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : str = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } A__ : Union[str, Any] = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } A__ : Dict = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class __snake_case ( UpperCamelCase_ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_INIT_CONFIGURATION _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = RealmTokenizer def __init__( self : int , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Optional[Any]=True , A_ : Optional[int]="[UNK]" , A_ : List[Any]="[SEP]" , A_ : List[Any]="[PAD]" , A_ : Optional[Any]="[CLS]" , A_ : Dict="[MASK]" , A_ : List[Any]=True , A_ : List[str]=None , **A_ : List[str] , ): super().__init__( A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , ) lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , A_) != do_lower_case or normalizer_state.get('''strip_accents''' , A_) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , A_) != tokenize_chinese_chars ): lowerCAmelCase_ : int = getattr(A_ , normalizer_state.pop('''type''')) lowerCAmelCase_ : str = do_lower_case lowerCAmelCase_ : Dict = strip_accents lowerCAmelCase_ : Optional[Any] = tokenize_chinese_chars lowerCAmelCase_ : Union[str, Any] = normalizer_class(**A_) lowerCAmelCase_ : Any = do_lower_case def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , **A_ : Tuple): lowerCAmelCase_ : List[str] = PaddingStrategy.MAX_LENGTH lowerCAmelCase_ : str = text lowerCAmelCase_ : int = kwargs.pop('''text_pair''' , A_) lowerCAmelCase_ : str = kwargs.pop('''return_tensors''' , A_) lowerCAmelCase_ : int = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(A_): if batch_text_pair is not None: lowerCAmelCase_ : List[Any] = batch_text_pair[idx] else: lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : int = super().__call__(A_ , A_ , return_tensors=A_ , **A_) lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''input_ids''') lowerCAmelCase_ : List[str] = encoded_candidates.get('''attention_mask''') lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''token_type_ids''') if encoded_input_ids is not None: output_data["input_ids"].append(A_) if encoded_attention_mask is not None: output_data["attention_mask"].append(A_) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(A_) lowerCAmelCase_ : List[str] = {key: item for key, item in output_data.items() if len(A_) != 0} return BatchEncoding(A_ , tensor_type=A_) def UpperCAmelCase__ ( self : List[str] , A_ : Tuple , A_ : List[Any]=None): lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None): lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None): lowerCAmelCase_ : List[str] = self._tokenizer.model.save(A_ , name=A_) return tuple(A_)
103
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case = logging.get_logger(__name__) __snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __snake_case = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __snake_case = { '''facebook/blenderbot_small-90M''': 512, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Union[str, Any] = add_prefix_space def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.sep_token_id] UpperCamelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
97
0
'''simple docstring''' import argparse import os import re lowerCAmelCase__ = '''src/transformers''' # Pattern that looks at the indentation in a line. lowerCAmelCase__ = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''') def _A ( A__ ): """simple docstring""" __lowercase = _re_indent.search(A__ ) return "" if search is None else search.groups()[0] def _A ( A__ , A__="" , A__=None , A__=None ): """simple docstring""" __lowercase = 0 __lowercase = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(A__ ): index += 1 __lowercase = ['''\n'''.join(lines[:index] )] else: __lowercase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowercase = [lines[index]] index += 1 while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(A__ ) ) if index < len(A__ ) - 1: __lowercase = [lines[index + 1]] index += 1 else: __lowercase = [] else: blocks.append('''\n'''.join(A__ ) ) __lowercase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(A__ ) > 0: blocks.append('''\n'''.join(A__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(A__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _A ( A__ ): """simple docstring""" def _inner(A__ ): return key(A__ ).lower().replace('''_''' , '''''' ) return _inner def _A ( A__ , A__=None ): """simple docstring""" def noop(A__ ): return x if key is None: __lowercase = noop # Constants are all uppercase, they go first. __lowercase = [obj for obj in objects if key(A__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()] # Functions begin with a lowercase, they go last. __lowercase = [obj for obj in objects if not key(A__ )[0].isupper()] __lowercase = ignore_underscore(A__ ) return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) def _A ( A__ ): """simple docstring""" def _replace(A__ ): __lowercase = match.groups()[0] if "," not in imports: return F"[{imports}]" __lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]" __lowercase = import_statement.split('''\n''' ) if len(A__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowercase = 2 if lines[1].strip() == '''[''' else 1 __lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowercase = sort_objects(A__ , key=lambda A__ : x[1] ) __lowercase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(A__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowercase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] __lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] ) return "\n".join(A__ ) else: # Finally we have to deal with imports fitting on one line __lowercase = _re_bracket_content.sub(_replace , A__ ) return import_statement def _A ( A__ , A__=True ): """simple docstring""" with open(A__ , encoding='''utf-8''' ) as f: __lowercase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowercase = split_code_in_indented_blocks( A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(A__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowercase = main_blocks[block_idx] __lowercase = block.split('''\n''' ) # Get to the start of the imports. __lowercase = 0 while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowercase = len(A__ ) else: line_idx += 1 if line_idx >= len(A__ ): continue # Ignore beginning and last line: they don't contain anything. __lowercase = '''\n'''.join(block_lines[line_idx:-1] ) __lowercase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ ) # We have two categories of import key: list or _import_structure[key].append/extend __lowercase = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None] __lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowercase = 0 __lowercase = [] for i in range(len(A__ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: __lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(A__ ) count += 1 # And we put our main block back together with its first and last line. __lowercase = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(A__ ): if check_only: return True else: print(F"Overwriting {file}." ) with open(A__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(A__ ) ) def _A ( A__=True ): """simple docstring""" __lowercase = [] for root, _, files in os.walk(A__ ): if "__init__.py" in files: __lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ ) if result: __lowercase = [os.path.join(A__ , '''__init__.py''' )] if len(A__ ) > 0: raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowerCAmelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
104
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
97
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a : str = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
105
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowercase ( A__ ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Any = Generator( cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' if self.streaming: UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: UpperCamelCase__ :Optional[int] = None UpperCamelCase__ :int = None UpperCamelCase__ :Any = None UpperCamelCase__ :Any = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) UpperCamelCase__ :List[Any] = self.builder.as_dataset( split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
97
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ = 10_00 ): return sum(e for e in range(3 , A_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
106
'''simple docstring''' __snake_case = 65521 def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = 1 UpperCamelCase__ :Any = 0 for plain_chr in plain_text: UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER return (b << 16) | a
97
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase : Any = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowerCAmelCase : Dict = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowerCAmelCase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowerCAmelCase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
107
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowercase ( A__ ): """simple docstring""" _a = 'camembert' def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :Optional[int] = num_hidden_layers UpperCamelCase__ :List[Any] = num_attention_heads UpperCamelCase__ :Union[str, Any] = hidden_act UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :int = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[str] = layer_norm_eps UpperCamelCase__ :int = position_embedding_type UpperCamelCase__ :Any = use_cache UpperCamelCase__ :Any = classifier_dropout class lowercase ( A__ ): """simple docstring""" @property def lowerCAmelCase__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
97
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=2 , snake_case__=4 , snake_case__="last" , snake_case__=True , snake_case__=None , snake_case__=0 , ): """simple docstring""" lowerCAmelCase : Tuple = parent lowerCAmelCase : str = batch_size lowerCAmelCase : int = seq_length lowerCAmelCase : Union[str, Any] = is_training lowerCAmelCase : List[Any] = use_input_lengths lowerCAmelCase : Tuple = use_token_type_ids lowerCAmelCase : Optional[Any] = use_labels lowerCAmelCase : str = gelu_activation lowerCAmelCase : List[Any] = sinusoidal_embeddings lowerCAmelCase : Any = causal lowerCAmelCase : List[str] = asm lowerCAmelCase : str = n_langs lowerCAmelCase : Union[str, Any] = vocab_size lowerCAmelCase : Optional[int] = n_special lowerCAmelCase : str = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : Dict = num_attention_heads lowerCAmelCase : List[Any] = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : List[str] = max_position_embeddings lowerCAmelCase : Optional[int] = type_sequence_label_size lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : List[str] = num_labels lowerCAmelCase : Union[str, Any] = num_choices lowerCAmelCase : Any = summary_type lowerCAmelCase : Tuple = use_proj lowerCAmelCase : int = scope lowerCAmelCase : Union[str, Any] = bos_token_id def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : int = None if self.use_input_lengths: lowerCAmelCase : Any = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase : Union[str, Any] = None if self.use_token_type_ids: lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase : Union[str, Any] = None lowerCAmelCase : List[str] = None lowerCAmelCase : List[Any] = None if self.use_labels: lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : Union[str, Any] = XLMModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : str = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowerCAmelCase : str = model(snake_case__ , langs=snake_case__ ) lowerCAmelCase : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : List[str] = XLMWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : Any = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : Union[str, Any] = XLMForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : Tuple = model(snake_case__ ) lowerCAmelCase : List[str] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) lowerCAmelCase : List[str] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : Dict = XLMForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : Union[str, Any] = model(snake_case__ ) lowerCAmelCase : int = model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowerCAmelCase : List[Any] = model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowerCAmelCase) , ) : str = result_with_labels.to_tuple() lowerCAmelCase : int = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowerCAmelCase) , ) : Optional[int] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : Optional[int] = XLMForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : Any = model(snake_case__ ) lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : List[Any] = self.num_labels lowerCAmelCase : str = XLMForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : Optional[Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : Tuple = self.num_choices lowerCAmelCase : List[Any] = XLMForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : int = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[str] = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) : List[Any] = config_and_inputs lowerCAmelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ): """simple docstring""" a : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) a : str =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable a : Union[str, Any] =( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ): """simple docstring""" lowerCAmelCase : List[Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowerCAmelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = XLMModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case__ ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=1 ): """simple docstring""" self.assertIsInstance(snake_case__ , snake_case__ ) self.assertListEqual( [isinstance(snake_case__ , snake_case__ ) for iter_attentions in attentions] , [True] * len(snake_case__ ) ) self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(snake_case__ ): # adds PAD dummy token lowerCAmelCase : Optional[int] = min_length + idx + 1 lowerCAmelCase : List[str] = min_length + idx + 1 lowerCAmelCase : Dict = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case__ ) ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=1 ): """simple docstring""" self.assertIsInstance(snake_case__ , snake_case__ ) self.assertListEqual( [isinstance(snake_case__ , snake_case__ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case__ ) , ) self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(snake_case__ ): # adds PAD dummy token lowerCAmelCase : List[str] = min_length + idx + 1 lowerCAmelCase : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case__ ) , ) pass @slow def lowercase__ ( self ): """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Optional[int] = XLMModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(snake_case__ ) lowerCAmelCase : Union[str, Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case__ ) # the president lowerCAmelCase : Any = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase : Tuple = model.generate(snake_case__ , do_sample=snake_case__ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case__ )
108
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ :str = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :str = min_resolution UpperCamelCase__ :Optional[Any] = max_resolution UpperCamelCase__ :int = do_resize UpperCamelCase__ :Optional[Any] = size UpperCamelCase__ :Tuple = do_normalize UpperCamelCase__ :List[Any] = image_mean UpperCamelCase__ :Dict = image_std UpperCamelCase__ :Union[str, Any] = do_rescale UpperCamelCase__ :Union[str, Any] = rescale_factor UpperCamelCase__ :Union[str, Any] = do_pad def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if not batched: UpperCamelCase__ :List[str] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ :Dict = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ :int = self.size['''shortest_edge'''] UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ :str = self.size['''shortest_edge'''] UpperCamelCase__ :str = self.size['''shortest_edge'''] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.loads(f.read() ) UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target} # encode them UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify orig_size UpperCamelCase__ :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ ) # verify orig_size UpperCamelCase__ :List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
97
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A: Tuple = logging.get_logger(__name__) A: List[Any] = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = 'donut-swin' __lowerCAmelCase : int = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , **_SCREAMING_SNAKE_CASE , ) -> Any: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = image_size UpperCAmelCase : Optional[Any] = patch_size UpperCAmelCase : Any = num_channels UpperCAmelCase : int = embed_dim UpperCAmelCase : Tuple = depths UpperCAmelCase : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = num_heads UpperCAmelCase : Optional[int] = window_size UpperCAmelCase : List[str] = mlp_ratio UpperCAmelCase : Any = qkv_bias UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : List[str] = attention_probs_dropout_prob UpperCAmelCase : List[str] = drop_path_rate UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : Any = use_absolute_embeddings UpperCAmelCase : List[Any] = layer_norm_eps UpperCAmelCase : Tuple = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase : Optional[int] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
109
'''simple docstring''' from collections import defaultdict class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCamelCase__ :Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1 def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCamelCase__ :Optional[int] = total_ways_util return self.dp[mask][task_no] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": __snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
97
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() ) lowercase__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if metric == "rouge2": lowercase__ = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": lowercase__ = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": lowercase__ = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": lowercase__ = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ''' function.''' ) lowercase__ = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , ) class _a ( pl.Callback ): def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {f'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(UpperCamelCase_ ) @rank_zero_only def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: pl.Trainer , UpperCamelCase_: pl.LightningModule , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=True ) -> None: """simple docstring""" logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) lowercase__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results lowercase__ = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase__ = od / '''test_results.txt''' lowercase__ = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase__ = od / f'{type_path}_results/{trainer.global_step:05d}.txt' lowercase__ = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=UpperCamelCase_ ) generations_file.parent.mkdir(exist_ok=UpperCamelCase_ ) with open(UpperCamelCase_ , '''a+''' ) as writer: for key in sorted(UpperCamelCase_ ): if key in ["log", "progress_bar", "preds"]: continue lowercase__ = metrics[key] if isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = val.item() lowercase__ = f'{key}: {val:.6f}\n' writer.write(UpperCamelCase_ ) if not save_generations: return if "preds" in metrics: lowercase__ = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(UpperCamelCase_ ) @rank_zero_only def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple ) -> Optional[int]: """simple docstring""" try: lowercase__ = pl_module.model.model.num_parameters() except AttributeError: lowercase__ = pl_module.model.num_parameters() lowercase__ = count_trainable_parameters(UpperCamelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} ) @rank_zero_only def lowerCamelCase_ ( self: str , UpperCamelCase_: pl.Trainer , UpperCamelCase_: pl.LightningModule ) -> Tuple: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(UpperCamelCase_ , UpperCamelCase_ , '''test''' ) @rank_zero_only def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: pl.Trainer , UpperCamelCase_: List[str] ) -> Union[str, Any]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
110
'''simple docstring''' import csv import tweepy # Twitter API credentials __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' def a ( __a ) -> None: '''simple docstring''' UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a ) auth.set_access_token(__a , __a ) UpperCamelCase__ :List[str] = tweepy.API(__a ) # initialize a list to hold all the tweepy Tweets UpperCamelCase__ :Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 ) # save most recent tweets alltweets.extend(__a ) # save the id of the oldest tweet less one UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__a ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates UpperCamelCase__ :Union[str, Any] = api.user_timeline( screen_name=__a , count=200 , max_id=__a ) # save most recent tweets alltweets.extend(__a ) # update the id of the oldest tweet less one UpperCamelCase__ :Tuple = alltweets[-1].id - 1 print(f'''...{len(__a )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f: UpperCamelCase__ :Tuple = csv.writer(__a ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__a ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
97
0
from __future__ import annotations class __snake_case : def __init__( self : List[str] , A_ : Optional[int]): lowerCAmelCase_ : str = data lowerCAmelCase_ : Node | None = None lowerCAmelCase_ : Node | None = None def UpperCamelCase( __UpperCamelCase : int ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def UpperCamelCase( __UpperCamelCase : List[str] ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def UpperCamelCase( __UpperCamelCase : str ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def UpperCamelCase( ): # Main function for testing. lowerCAmelCase_ : Dict = Node(1 ) lowerCAmelCase_ : Tuple = Node(2 ) lowerCAmelCase_ : Tuple = Node(3 ) lowerCAmelCase_ : Any = Node(4 ) lowerCAmelCase_ : List[Any] = Node(5 ) lowerCAmelCase_ : List[str] = Node(6 ) lowerCAmelCase_ : Union[str, Any] = Node(7 ) lowerCAmelCase_ : List[Any] = Node(8 ) lowerCAmelCase_ : int = Node(9 ) print(is_full_binary_tree(__a ) ) print(depth_of_tree(__a ) ) print('''Tree is: ''' ) display(__a ) if __name__ == "__main__": main()
103
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCamelCase__ :List[str] = value else: UpperCamelCase__ :Dict = value return new_state_dict def a ( __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Tuple = in_proj_bias[:256] UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ :Tuple = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Optional[int] = in_proj_bias[:256] UpperCamelCase__ :Tuple = in_proj_weight[256:512, :] UpperCamelCase__ :Dict = in_proj_bias[256:512] UpperCamelCase__ :Any = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ :List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:] def a ( __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :str = image.size UpperCamelCase__ :Optional[Any] = max(__a , __a ) UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ :Dict = target_max_size / current_max_size UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = F.to_tensor(__a ) UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a ( __a , __a , __a ) -> Dict: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ :Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ :Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCamelCase__ :Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val # create HuggingFace model and load state dict UpperCamelCase__ :str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ :List[str] = 15 UpperCamelCase__ :int = 2 UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ :int = idalabel UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ :int = 125 UpperCamelCase__ :List[str] = 6 UpperCamelCase__ :Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ :Dict = idalabel UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ :List[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCamelCase__ :int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a ) UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' ) UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ :Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ :Dict = (1, 15, 3) UpperCamelCase__ :List[Any] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ :Optional[Any] = (1, 125, 7) UpperCamelCase__ :Dict = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCamelCase__ :Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
97
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _A ( snake_case , snake_case , snake_case ) -> List[str]: return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def _A ( snake_case , snake_case , snake_case , snake_case="attention" ) -> int: _lowercase : Any = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) _lowercase : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) _lowercase : int = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) _lowercase : Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) _lowercase : List[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) _lowercase : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) _lowercase : Tuple = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) _lowercase : Tuple = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def _A ( snake_case , snake_case , snake_case , snake_case=False ) -> List[str]: if split_mlp_wi: _lowercase : Any = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] _lowercase : Any = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] _lowercase : List[Any] = (wi_a, wi_a) else: _lowercase : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] _lowercase : Optional[int] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def _A ( snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]: return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def _A ( snake_case , *, snake_case , snake_case , snake_case = False ) -> str: _lowercase : str = traverse_util.flatten_dict(variables["target"] ) _lowercase : Dict = {'''/'''.join(__a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _lowercase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old print("Split MLP:" , __a ) _lowercase : Any = collections.OrderedDict() # Shared embeddings. _lowercase : List[str] = old['''token_embedder/embedding'''] # Encoder. for i in range(__a ): # Block i, layer 0 (Self Attention). _lowercase : Any = tax_layer_norm_lookup(__a , __a , "encoder" , "pre_attention_layer_norm" ) _lowercase : List[str] = tax_attention_lookup(__a , __a , "encoder" , "attention" ) _lowercase : str = layer_norm _lowercase : int = k.T _lowercase : Union[str, Any] = o.T _lowercase : Optional[int] = q.T _lowercase : Tuple = v.T # Block i, layer 1 (MLP). _lowercase : Optional[Any] = tax_layer_norm_lookup(__a , __a , "encoder" , "pre_mlp_layer_norm" ) _lowercase : str = tax_mlp_lookup(__a , __a , "encoder" , __a ) _lowercase : Any = layer_norm if split_mlp_wi: _lowercase : List[Any] = wi[0].T _lowercase : Tuple = wi[1].T else: _lowercase : Union[str, Any] = wi.T _lowercase : List[str] = wo.T if scalable_attention: # convert the rel_embedding of each layer _lowercase : List[str] = tax_relpos_bias_lookup( __a , __a , "encoder" ).T _lowercase : str = old['''encoder/encoder_norm/scale'''] if not scalable_attention: _lowercase : Optional[int] = tax_relpos_bias_lookup( __a , 0 , "encoder" ).T _lowercase : int = tax_relpos_bias_lookup( __a , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(__a ): # Block i, layer 0 (Self Attention). _lowercase : Dict = tax_layer_norm_lookup(__a , __a , "decoder" , "pre_self_attention_layer_norm" ) _lowercase : Optional[int] = tax_attention_lookup(__a , __a , "decoder" , "self_attention" ) _lowercase : List[str] = layer_norm _lowercase : Optional[int] = k.T _lowercase : Union[str, Any] = o.T _lowercase : Optional[int] = q.T _lowercase : str = v.T # Block i, layer 1 (Cross Attention). _lowercase : Union[str, Any] = tax_layer_norm_lookup(__a , __a , "decoder" , "pre_cross_attention_layer_norm" ) _lowercase : Union[str, Any] = tax_attention_lookup(__a , __a , "decoder" , "encoder_decoder_attention" ) _lowercase : Tuple = layer_norm _lowercase : List[str] = k.T _lowercase : List[str] = o.T _lowercase : str = q.T _lowercase : str = v.T # Block i, layer 2 (MLP). _lowercase : Tuple = tax_layer_norm_lookup(__a , __a , "decoder" , "pre_mlp_layer_norm" ) _lowercase : Optional[int] = tax_mlp_lookup(__a , __a , "decoder" , __a ) _lowercase : Dict = layer_norm if split_mlp_wi: _lowercase : List[Any] = wi[0].T _lowercase : Tuple = wi[1].T else: _lowercase : Dict = wi.T _lowercase : Optional[int] = wo.T if scalable_attention: # convert the rel_embedding of each layer _lowercase : Any = tax_relpos_bias_lookup(__a , __a , "decoder" ).T _lowercase : Dict = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _lowercase : Tuple = old['''decoder/logits_dense/kernel'''].T return new def _A ( snake_case , snake_case ) -> List[str]: _lowercase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _lowercase : List[str] = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _lowercase : Tuple = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) _lowercase : Tuple = state_dict['''shared.weight'''] return state_dict def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]: _lowercase : int = checkpoints.load_tax_checkpoint(__a ) _lowercase : List[str] = convert_tax_to_pytorch( __a , num_layers=config.num_layers , is_encoder_only=__a , scalable_attention=__a ) _lowercase : List[str] = make_state_dict(__a , __a ) model.load_state_dict(__a , strict=__a ) def _A ( snake_case , snake_case , snake_case , snake_case = False , snake_case = False , ) -> List[Any]: _lowercase : int = MTaConfig.from_json_file(__a ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _lowercase : Dict = UMTaEncoderModel(__a ) else: _lowercase : Optional[int] = UMTaForConditionalGeneration(__a ) # Load weights from tf checkpoint load_tax_weights_in_ta(__a , __a , __a , __a , __a ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__a ) # Verify that we can load the checkpoint. model.from_pretrained(__a ) print("Done" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) _snake_case = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
250
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( __a ) -> bool: '''simple docstring''' UpperCamelCase__ :int = int(number**0.5 ) return number == sq * sq def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]: '''simple docstring''' UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den UpperCamelCase__ :int = x_den * y_den * z_den UpperCamelCase__ :int = gcd(__a , __a ) top //= hcf bottom //= hcf return top, bottom def a ( __a = 35 ) -> int: '''simple docstring''' UpperCamelCase__ :set = set() UpperCamelCase__ :int UpperCamelCase__ :Fraction = Fraction(0 ) UpperCamelCase__ :tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 UpperCamelCase__ :int = x_num * y_den + x_den * y_num UpperCamelCase__ :Any = x_den * y_den UpperCamelCase__ :Tuple = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Optional[int] = int(sqrt(__a ) ) UpperCamelCase__ :int = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=-1 UpperCamelCase__ :Tuple = x_num * y_num UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den UpperCamelCase__ :List[str] = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Union[str, Any] = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num UpperCamelCase__ :Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :str = int(sqrt(__a ) ) UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Dict = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :int = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a , __a ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
97
0
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase: Tuple = logging.get_logger(__name__) _UpperCamelCase: Optional[int] = '▁' _UpperCamelCase: Union[str, Any] = {'vocab_file': 'prophetnet.tokenizer'} _UpperCamelCase: int = { 'vocab_file': { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer' ), } } _UpperCamelCase: Optional[Any] = { 'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False}, } _UpperCamelCase: str = { 'microsoft/xprophetnet-large-wiki100-cased': 5_1_2, } def lowercase__ ( _UpperCAmelCase ) -> List[Any]: '''simple docstring''' lowercase : Optional[int] = collections.OrderedDict() with open(__a , 'r' , encoding='utf-8' ) as reader: lowercase : int = reader.readlines() for index, token in enumerate(__a ): lowercase : Tuple = token.rstrip('\n' ) lowercase : List[Any] = index return vocab class a__ ( A__ ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Optional[Any]="[SEP]", lowerCAmelCase : Any="[SEP]", lowerCAmelCase : Union[str, Any]="[SEP]", lowerCAmelCase : Any="[UNK]", lowerCAmelCase : Any="[PAD]", lowerCAmelCase : Optional[Any]="[CLS]", lowerCAmelCase : List[str]="[MASK]", lowerCAmelCase : Dict = None, **lowerCAmelCase : Tuple, ) -> int: lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_, eos_token=UpperCamelCase_, sep_token=UpperCamelCase_, unk_token=UpperCamelCase_, pad_token=UpperCamelCase_, cls_token=UpperCamelCase_, mask_token=UpperCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase_, ) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) lowercase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowercase : Any = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): lowercase : str = f'''[unused{i}]''' lowercase : Dict = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowercase : str = 12 lowercase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(UpperCamelCase_ ) def __getstate__( self : List[Any] ) -> Optional[int]: lowercase : Tuple = self.__dict__.copy() lowercase : Optional[Any] = None return state def __setstate__( self : Tuple, lowerCAmelCase : Optional[int] ) -> List[Any]: lowercase : Any = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): lowercase : Optional[Any] = {} lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase ( self : List[Any], lowerCAmelCase : Optional[int], lowerCAmelCase : str = None, lowerCAmelCase : Dict = False ) -> Tuple: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_, token_ids_a=UpperCamelCase_, already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return ([0] * len(UpperCamelCase_ )) + [1] return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowercase ( self : List[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple = None ) -> Optional[Any]: lowercase : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase ( self : List[Any] ) -> List[Any]: return len(self.sp_model ) + self.fairseq_offset def lowercase ( self : int ) -> Union[str, Any]: lowercase : Optional[int] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self : List[str], lowerCAmelCase : Optional[Any] ) -> int: return self.sp_model.encode(UpperCamelCase_, out_type=UpperCamelCase_ ) def lowercase ( self : Tuple, lowerCAmelCase : Any ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : Tuple = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase ( self : List[str], lowerCAmelCase : List[str] ) -> List[str]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self : List[str], lowerCAmelCase : str ) -> str: lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_, ' ' ).strip() return out_string def lowercase ( self : Union[str, Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : str = None ) -> Union[str, Any]: if not os.path.isdir(UpperCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : str = os.path.join( UpperCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_, 'wb' ) as fi: lowercase : Dict = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,) def lowercase ( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str] = None ) -> Optional[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowercase : Tuple = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
255
'''simple docstring''' def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Optional[int] = [] UpperCamelCase__ :int = 1 while len(__a ) < 1e6: constant.append(str(__a ) ) i += 1 UpperCamelCase__ :Union[str, Any] = ''''''.join(__a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
97
0
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss SCREAMING_SNAKE_CASE : int = pytest.mark.integration @require_faiss class _UpperCAmelCase ( A__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(UpperCamelCase_ ) for x in np.arange(30 ).tolist()]} ) return dset def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() __snake_case : Any = dset.map( lambda a_ , a_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ) __snake_case : List[Any] = dset.add_faiss_index('''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) __snake_case : Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) dset.drop_index('''vecs''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case : str = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file: dset.save_faiss_index('''vecs''' , tmp_file.name ) dset.load_faiss_index('''vecs2''' , tmp_file.name ) os.unlink(tmp_file.name ) __snake_case : str = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' ) dset.drop_index('''vecs''' ) self.assertRaises(UpperCamelCase_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from elasticsearch import Elasticsearch __snake_case : Dataset = self._create_dummy_dataset() with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: __snake_case : Optional[int] = {'''acknowledged''': True} mocked_bulk.return_value([(True, None)] * 30 ) __snake_case : str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}} __snake_case : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index('''filename''' , es_client=UpperCamelCase_ ) __snake_case : Tuple = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) @require_faiss class _UpperCAmelCase ( A__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __snake_case : Any = np.zeros(5 , dtype=np.floataa ) __snake_case : Optional[int] = 1 __snake_case : str = index.search(UpperCamelCase_ ) self.assertRaises(UpperCamelCase_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __snake_case : Optional[Any] = index.search_batch(UpperCamelCase_ ) self.assertRaises(UpperCamelCase_ , index.search_batch , queries[0] ) __snake_case : List[Any] = [scores[0] for scores in total_scores] __snake_case : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(UpperCamelCase_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : Tuple = FaissIndex(string_factory='''Flat''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __snake_case : Dict = FaissIndex(string_factory='''LSH''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(UpperCamelCase_ ): __snake_case : Optional[int] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : List[Any] = faiss.IndexFlat(5 ) __snake_case : List[Any] = FaissIndex(custom_index=UpperCamelCase_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import faiss __snake_case : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file: index.save(tmp_file.name ) __snake_case : Optional[Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __snake_case : Dict = np.zeros(5 , dtype=np.floataa ) __snake_case : str = 1 __snake_case : List[str] = index.search(UpperCamelCase_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowercase ( _snake_case : Tuple ) ->Tuple: """simple docstring""" import faiss __snake_case : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __snake_case : Optional[int] = '''index.faiss''' __snake_case : Tuple = f"""mock://{index_name}""" index.save(__a , storage_options=mockfs.storage_options ) __snake_case : List[Any] = FaissIndex.load(__a , storage_options=mockfs.storage_options ) __snake_case : Tuple = np.zeros(5 , dtype=np.floataa ) __snake_case : int = 1 __snake_case : str = index.search(__a ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _UpperCAmelCase ( A__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: __snake_case : Any = Elasticsearch() __snake_case : Any = {'''acknowledged''': True} __snake_case : int = ElasticSearchIndex(es_client=UpperCamelCase_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['''foo''', '''bar''', '''foobar'''] ) # single query __snake_case : Any = '''foo''' __snake_case : Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} __snake_case : int = index.search(UpperCamelCase_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __snake_case : Dict = '''foo''' __snake_case : Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} __snake_case : Any = index.search(UpperCamelCase_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __snake_case : Tuple = ['''foo''', '''bar''', '''foobar'''] __snake_case : Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} __snake_case : List[Any] = index.search_batch(UpperCamelCase_ ) __snake_case : Union[str, Any] = [scores[0] for scores in total_scores] __snake_case : int = [indices[0] for indices in total_indices] self.assertGreater(np.min(UpperCamelCase_ ) , 0 ) self.assertListEqual([1, 1, 1] , UpperCamelCase_ ) # batched queries with timeout __snake_case : Any = ['''foo''', '''bar''', '''foobar'''] __snake_case : int = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} __snake_case : Optional[int] = index.search_batch(UpperCamelCase_ , request_timeout=30 ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : str = [indices[0] for indices in total_indices] self.assertGreater(np.min(UpperCamelCase_ ) , 0 ) self.assertListEqual([1, 1, 1] , UpperCamelCase_ )
102
'''simple docstring''' from PIL import Image def a ( __a , __a ) -> Image: '''simple docstring''' def brightness(__a ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 __snake_case = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
97
0
"""simple docstring""" from ... import PretrainedConfig A__ : List[Any] = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowercase__ ( A__ ): _UpperCAmelCase :Optional[int] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP _UpperCAmelCase :Optional[Any] = "nezha" def __init__( self : int , snake_case__ : Tuple=2_1128 , snake_case__ : str=768 , snake_case__ : str=12 , snake_case__ : Any=12 , snake_case__ : str=3072 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=512 , snake_case__ : Optional[Any]=64 , snake_case__ : Dict=2 , snake_case__ : str=0.02 , snake_case__ : List[str]=1E-12 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=0 , snake_case__ : Optional[int]=2 , snake_case__ : Dict=3 , snake_case__ : Dict=True , **snake_case__ : List[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] =vocab_size lowerCamelCase_ : Dict =hidden_size lowerCamelCase_ : str =num_hidden_layers lowerCamelCase_ : Any =num_attention_heads lowerCamelCase_ : List[Any] =hidden_act lowerCamelCase_ : List[Any] =intermediate_size lowerCamelCase_ : List[str] =hidden_dropout_prob lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob lowerCamelCase_ : Dict =max_position_embeddings lowerCamelCase_ : int =max_relative_position lowerCamelCase_ : Optional[int] =type_vocab_size lowerCamelCase_ : List[Any] =initializer_range lowerCamelCase_ : Any =layer_norm_eps lowerCamelCase_ : Dict =classifier_dropout lowerCamelCase_ : Optional[int] =use_cache
144
'''simple docstring''' from datetime import datetime as dt import os from github import Github __snake_case = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a ( ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' ) UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
97
0
'''simple docstring''' from collections import defaultdict class A__ : def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> int: '''simple docstring''' _a : List[Any] =total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 _a : Union[str, Any] =[ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] _a : str =defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 _a : Optional[int] =(1 << len(UpperCamelCase_ )) - 1 def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Optional[int]: '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement _a : str =self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. _a : Optional[int] =total_ways_util return self.dp[mask][task_no] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": A__: Optional[int] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. A__: Optional[int] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
276
'''simple docstring''' import re from filelock import FileLock try: import nltk __snake_case = True except (ImportError, ModuleNotFoundError): __snake_case = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def a ( __a ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
97
0
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } UpperCAmelCase_ = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } UpperCAmelCase_ = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__a ) return pairs class lowerCamelCase__( A__): UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: int="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: int="</s>" , UpperCamelCase_: Optional[int]="<s>" , UpperCamelCase_: Optional[Any]="<unk>" , UpperCamelCase_: int="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , **UpperCamelCase_: Union[str, Any] , ): super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) __lowerCamelCase = vocab_file __lowerCamelCase = merges_file __lowerCamelCase = {} __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = 3 self.add_from_file(UpperCamelCase_ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle: __lowerCamelCase = merges_handle.read().split("""\n""" )[:-1] __lowerCamelCase = [tuple(merge.split()[:-1] ) for merge in merges] __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = {} def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] __lowerCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Optional[Any] = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] = None ): __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCAmelCase__ ( self: List[str] ): return len(self.encoder ) def lowerCAmelCase__ ( self: Optional[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ): if token in self.cache: return self.cache[token] __lowerCamelCase = tuple(UpperCamelCase_ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __lowerCamelCase = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __lowerCamelCase = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(UpperCamelCase_ ): try: __lowerCamelCase = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCamelCase = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(UpperCamelCase_ ) __lowerCamelCase = new_word if len(UpperCamelCase_ ) == 1: break else: __lowerCamelCase = get_pairs(UpperCamelCase_ ) __lowerCamelCase = '''@@ '''.join(UpperCamelCase_ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word return word def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = [] __lowerCamelCase = re.findall(r"""\S+\n?""" , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) ) return split_tokens def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[int] ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ): return self.decoder.get(UpperCamelCase_ , self.unk_token ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ): __lowerCamelCase = ''' '''.join(UpperCamelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __lowerCamelCase = os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCamelCase = os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.merges_file , UpperCamelCase_ ) return out_vocab_file, out_merge_file def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): try: with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(UpperCamelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' ) return __lowerCamelCase = f.readlines() for lineTmp in lines: __lowerCamelCase = lineTmp.strip() __lowerCamelCase = line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt>\'""" ) __lowerCamelCase = line[:idx] __lowerCamelCase = len(self.encoder )
12
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a ) if save_dir is None: UpperCamelCase__ :Any = f'''{dataset}-{pair}''' UpperCamelCase__ :Dict = Path(__a ) save_dir.mkdir(exist_ok=__a ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' ) UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' ) UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' ) UpperCamelCase__ :Tuple = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): UpperCamelCase__ :Union[str, Any] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
97
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
188
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''DPTFeatureExtractor'''] __snake_case = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
97
0
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def a_ ( __lowercase : Optional[Any] ) -> Any: _snake_case = torch.exp(__a ) _snake_case = torch.sum(__a , dim=1 ) # sum of exp(x_i) _snake_case = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__a ) - B / A class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase : Any ): '''simple docstring''' super().__init__() _snake_case = config.output_attentions _snake_case = config.output_hidden_states _snake_case = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _snake_case = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _snake_case = [-1 for _ in range(config.num_hidden_layers )] def A ( self : Dict , lowercase : Dict ): '''simple docstring''' if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int): for i in range(len(self.early_exit_entropy ) ): _snake_case = x else: _snake_case = x def A ( self : Tuple , lowercase : Optional[int] ): '''simple docstring''' _snake_case = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def A ( self : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : Union[str, Any]=None , ): '''simple docstring''' _snake_case = () _snake_case = () _snake_case = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _snake_case = all_hidden_states + (hidden_states,) _snake_case = layer_module( UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ ) _snake_case = layer_outputs[0] if self.output_attentions: _snake_case = all_attentions + (layer_outputs[1],) _snake_case = (hidden_states,) if self.output_hidden_states: _snake_case = current_outputs + (all_hidden_states,) if self.output_attentions: _snake_case = current_outputs + (all_attentions,) _snake_case = self.highway[i](UpperCamelCase_ ) # logits, pooled_output if not self.training: _snake_case = highway_exit[0] _snake_case = entropy(UpperCamelCase_ ) _snake_case = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _snake_case = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _snake_case = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase_ , i + 1 ) else: _snake_case = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _snake_case = all_hidden_states + (hidden_states,) _snake_case = (hidden_states,) if self.output_hidden_states: _snake_case = outputs + (all_hidden_states,) if self.output_attentions: _snake_case = outputs + (all_attentions,) _snake_case = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " ,A__ ,) class SCREAMING_SNAKE_CASE__ ( A__ ): '''simple docstring''' def __init__( self : Tuple , lowercase : Any ): '''simple docstring''' super().__init__(UpperCamelCase_ ) _snake_case = config _snake_case = BertEmbeddings(UpperCamelCase_ ) _snake_case = DeeBertEncoder(UpperCamelCase_ ) _snake_case = BertPooler(UpperCamelCase_ ) self.init_weights() def A ( self : List[Any] ): '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def A ( self : Optional[int] ): '''simple docstring''' return self.embeddings.word_embeddings def A ( self : Tuple , lowercase : str ): '''simple docstring''' _snake_case = value def A ( self : List[str] , lowercase : int ): '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def A ( self : str , lowercase : Optional[int]=None , lowercase : str=None , lowercase : List[str]=None , lowercase : Any=None , lowercase : List[str]=None , lowercase : List[Any]=None , lowercase : Union[str, Any]=None , lowercase : List[str]=None , ): '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _snake_case = input_ids.size() elif inputs_embeds is not None: _snake_case = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _snake_case = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if encoder_attention_mask is None: _snake_case = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: _snake_case = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _snake_case = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _snake_case = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _snake_case = encoder_attention_mask[:, None, None, :] _snake_case = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _snake_case = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _snake_case = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) _snake_case = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) _snake_case = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(UpperCamelCase_ ) _snake_case = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class SCREAMING_SNAKE_CASE__ ( A__ ): '''simple docstring''' def __init__( self : Any , lowercase : Tuple , lowercase : Any ): '''simple docstring''' _snake_case = message _snake_case = exit_layer # start from 1! class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase : str ): '''simple docstring''' super().__init__() _snake_case = BertPooler(UpperCamelCase_ ) _snake_case = nn.Dropout(config.hidden_dropout_prob ) _snake_case = nn.Linear(config.hidden_size , config.num_labels ) def A ( self : int , lowercase : Optional[int] ): '''simple docstring''' _snake_case = encoder_outputs[0] _snake_case = self.pooler(UpperCamelCase_ ) # "return" pooler_output # BertModel _snake_case = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _snake_case = bmodel_output[1] _snake_case = self.dropout(UpperCamelCase_ ) _snake_case = self.classifier(UpperCamelCase_ ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " ,A__ ,) class SCREAMING_SNAKE_CASE__ ( A__ ): '''simple docstring''' def __init__( self : Any , lowercase : Optional[int] ): '''simple docstring''' super().__init__(UpperCamelCase_ ) _snake_case = config.num_labels _snake_case = config.num_hidden_layers _snake_case = DeeBertModel(UpperCamelCase_ ) _snake_case = nn.Dropout(config.hidden_dropout_prob ) _snake_case = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def A ( self : Optional[int] , lowercase : List[Any]=None , lowercase : str=None , lowercase : Dict=None , lowercase : Dict=None , lowercase : Any=None , lowercase : Tuple=None , lowercase : Optional[int]=None , lowercase : List[Any]=-1 , lowercase : Optional[int]=False , ): '''simple docstring''' _snake_case = self.num_layers try: _snake_case = self.bert( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _snake_case = outputs[1] _snake_case = self.dropout(UpperCamelCase_ ) _snake_case = self.classifier(UpperCamelCase_ ) _snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _snake_case = e.message _snake_case = e.exit_layer _snake_case = outputs[0] if not self.training: _snake_case = entropy(UpperCamelCase_ ) _snake_case = [] _snake_case = [] if labels is not None: if self.num_labels == 1: # We are doing regression _snake_case = MSELoss() _snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _snake_case = [] for highway_exit in outputs[-1]: _snake_case = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _snake_case = MSELoss() _snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _snake_case = CrossEntropyLoss() _snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: _snake_case = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _snake_case = (loss,) + outputs if not self.training: _snake_case = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _snake_case = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
282
'''simple docstring''' def a ( __a , __a ) -> int: '''simple docstring''' if len(__a ) != len(__a ): raise ValueError('''String lengths must match!''' ) UpperCamelCase__ :Union[str, Any] = 0 for chara, chara in zip(__a , __a ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
97
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase : List[Any] ={ '''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''], '''tokenization_luke''': ['''LukeTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] =[ '''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LukeForEntityClassification''', '''LukeForEntityPairClassification''', '''LukeForEntitySpanClassification''', '''LukeForMultipleChoice''', '''LukeForQuestionAnswering''', '''LukeForSequenceClassification''', '''LukeForTokenClassification''', '''LukeForMaskedLM''', '''LukeModel''', '''LukePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCamelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
189
'''simple docstring''' def a ( __a ) -> "list[int]": '''simple docstring''' if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCamelCase__ :int = 1 if upper_limit > 0: UpperCamelCase__ :int = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__a ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: __snake_case = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
97
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ = logging.get_logger(__name__) A_ = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class _snake_case ( A__ , A__ ): _A : str = '''resnet''' _A : List[Any] = ['''basic''', '''bottleneck'''] def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=64 ,SCREAMING_SNAKE_CASE__ : Any=[256, 512, 1_024, 2_048] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 4, 6, 3] ,SCREAMING_SNAKE_CASE__ : int="bottleneck" ,SCREAMING_SNAKE_CASE__ : str="relu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,): super().__init__(**UpperCamelCase_ ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) SCREAMING_SNAKE_CASE:Optional[Any] = num_channels SCREAMING_SNAKE_CASE:int = embedding_size SCREAMING_SNAKE_CASE:Dict = hidden_sizes SCREAMING_SNAKE_CASE:int = depths SCREAMING_SNAKE_CASE:int = layer_type SCREAMING_SNAKE_CASE:List[Any] = hidden_act SCREAMING_SNAKE_CASE:Optional[int] = downsample_in_first_stage SCREAMING_SNAKE_CASE:Tuple = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 ,len(UpperCamelCase_ ) + 1 )] SCREAMING_SNAKE_CASE:Tuple = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ ,out_indices=UpperCamelCase_ ,stage_names=self.stage_names ) class _snake_case ( A__ ): _A : List[Any] = version.parse('''1.11''' ) @property def __UpperCamelCase ( self : List[str] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __UpperCamelCase ( self : List[str] ): return 1e-3
139
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a ( __a , __a ) -> Optional[int]: '''simple docstring''' assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase__ :Tuple = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def a ( __a , __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :int = tmp_path / '''cache''' UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} UpperCamelCase__ :Any = features.copy() if features else default_expected_features UpperCamelCase__ :Union[str, Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} UpperCamelCase__ :int = features.copy() UpperCamelCase__ :List[Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Optional[int] = tmp_path / '''cache''' UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_json_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' if issubclass(__a , __a ): UpperCamelCase__ :Union[str, Any] = jsonl_path elif issubclass(__a , __a ): UpperCamelCase__ :int = [jsonl_path] UpperCamelCase__ :Dict = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) def a ( __a , __a , __a=("train",) ) -> Optional[Any]: '''simple docstring''' assert isinstance(__a , __a ) for split in splits: UpperCamelCase__ :Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features UpperCamelCase__ :str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> str: '''simple docstring''' if split: UpperCamelCase__ :List[str] = {split: jsonl_path} else: UpperCamelCase__ :int = '''train''' UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path} UpperCamelCase__ :Any = tmp_path / '''cache''' UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def a ( __a ) -> Union[str, Any]: '''simple docstring''' return json.load(__a ) def a ( __a ) -> int: '''simple docstring''' return [json.loads(__a ) for line in buffer] class lowercase : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :int = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' with pytest.raises(UpperCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :Dict = f.read() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :int = f.read() assert exported_content == original_content
97
0
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor A__ : Union[str, Any] = logging.get_logger(__name__) class __snake_case ( A__ ): def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Union[str, Any]): warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
103
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowercase ( A__ ): """simple docstring""" _a = ComputeEnvironment.AMAZON_SAGEMAKER _a = True _a = 'ml.p3.2xlarge' _a = 'accelerate_sagemaker_execution_role' _a = 'hf-sm' _a = 'us-east-1' _a = 1 _a = 'accelerate-sagemaker-1' _a = '1.6' _a = '4.4' _a = 'train.py' _a = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] _a = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ ) assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ ) assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ ) assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ ) assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ ) with pytest.raises(UpperCamelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
97
0
'''simple docstring''' _snake_case = 8.3_1_4_4_5_9_8 def _A ( snake_case , snake_case ) -> float: if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example _snake_case = 300 _snake_case = 28 _snake_case = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
250
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def a ( __a ) -> int: '''simple docstring''' for param in module.parameters(): UpperCamelCase__ :Dict = False def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCamelCase__ :Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Dict = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def a ( ) -> str: '''simple docstring''' UpperCamelCase__ :int = datetime.now() UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' ) return timestamp
97
0
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: '''simple docstring''' assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase : Union[str, Any] = tmp_path / '''cache''' lowercase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase : Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: '''simple docstring''' lowercase : Union[str, Any] = tmp_path / '''cache''' lowercase : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase : Optional[Any] = features.copy() if features else default_expected_features lowercase : Tuple = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: '''simple docstring''' lowercase : int = tmp_path / '''cache''' lowercase : str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowercase : Any = features.copy() if features else default_expected_features lowercase : Union[str, Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: '''simple docstring''' lowercase : Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowercase : int = features.copy() lowercase : List[Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : Optional[int] = tmp_path / '''cache''' lowercase : Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: '''simple docstring''' lowercase : Union[str, Any] = tmp_path / '''cache''' lowercase : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase : List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_json_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: '''simple docstring''' if issubclass(__a , __a ): lowercase : Union[str, Any] = jsonl_path elif issubclass(__a , __a ): lowercase : int = [jsonl_path] lowercase : Dict = tmp_path / '''cache''' lowercase : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase : List[str] = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=("train",) ) -> Optional[Any]: '''simple docstring''' assert isinstance(__a , __a ) for split in splits: lowercase : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: '''simple docstring''' lowercase : List[str] = tmp_path / '''cache''' lowercase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase : str = JsonDatasetReader({'train': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: '''simple docstring''' lowercase : Tuple = tmp_path / '''cache''' lowercase : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase : Optional[int] = features.copy() if features else default_expected_features lowercase : str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : Dict = JsonDatasetReader({'train': jsonl_path} , features=__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: '''simple docstring''' if split: lowercase : List[str] = {split: jsonl_path} else: lowercase : int = '''train''' lowercase : int = {'''train''': jsonl_path, '''test''': jsonl_path} lowercase : Any = tmp_path / '''cache''' lowercase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase : Any = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return json.load(__a ) def lowercase__ ( _UpperCAmelCase ) -> int: '''simple docstring''' return [json.loads(__a ) for line in buffer] class a__ : @pytest.mark.parametrize('lines, load_json_function', [(True, load_json_lines), (False, load_json)] ) def lowercase ( self : Optional[Any], lowerCAmelCase : Optional[int], lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any] ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_, UpperCamelCase_, lines=UpperCamelCase_ ).write() buffer.seek(0 ) lowercase : List[Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_, UpperCamelCase_ ) assert isinstance(exported_content[0], UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at', [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ], ) def lowercase ( self : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Optional[Any] ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_, UpperCamelCase_, lines=UpperCamelCase_, orient=UpperCamelCase_ ).write() buffer.seek(0 ) lowercase : Optional[int] = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_, UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_, 'keys' ) and not hasattr(exported_content[0], 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize('lines, load_json_function', [(True, load_json_lines), (False, load_json)] ) def lowercase ( self : Any, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : str ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_, UpperCamelCase_, lines=UpperCamelCase_, num_proc=2 ).write() buffer.seek(0 ) lowercase : Union[str, Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_, UpperCamelCase_ ) assert isinstance(exported_content[0], UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at', [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ], ) def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : int, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any] ) -> Dict: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_, UpperCamelCase_, lines=UpperCamelCase_, orient=UpperCamelCase_, num_proc=2 ).write() buffer.seek(0 ) lowercase : int = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_, UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_, 'keys' ) and not hasattr(exported_content[0], 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 def lowercase ( self : List[Any], lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: with pytest.raises(UpperCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_, UpperCamelCase_, num_proc=0 ) @pytest.mark.parametrize('compression, extension', [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowercase ( self : int, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int] ) -> Tuple: lowercase : Tuple = tmp_path_factory.mktemp('data' ) / f'''test.json.{extension}''' lowercase : Union[str, Any] = str(shared_datadir / f'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase_, UpperCamelCase_, compression=UpperCamelCase_ ).write() with fsspec.open(UpperCamelCase_, 'rb', compression='infer' ) as f: lowercase : Dict = f.read() with fsspec.open(UpperCamelCase_, 'rb', compression='infer' ) as f: lowercase : int = f.read() assert exported_content == original_content
255
'''simple docstring''' from scipy.stats import pearsonr import datasets __snake_case = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_pvalue: UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
97
0
"""simple docstring""" from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE : Union[str, Any] = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : List[str] = Github(os.environ['''GITHUB_TOKEN'''] ) __snake_case : Tuple = g.get_repo('''huggingface/transformers''' ) __snake_case : Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __snake_case : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _snake_case : i.created_at , reverse=__a ) __snake_case : List[Any] = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
102
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case = logging.get_logger(__name__) __snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __snake_case = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __snake_case = { '''facebook/blenderbot_small-90M''': 512, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Union[str, Any] = add_prefix_space def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.sep_token_id] UpperCamelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
97
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : Union[str, Any] =TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowerCamelCase_ : Union[str, Any] =AutoTokenizer.from_pretrained("google/mt5-small" ) lowerCamelCase_ : List[Any] =tokenizer("Hello there" , return_tensors="tf" ).input_ids lowerCamelCase_ : Union[str, Any] =tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowerCamelCase_ : Union[str, Any] =model(UpperCamelCase_ , labels=UpperCamelCase_ ).loss lowerCamelCase_ : Union[str, Any] =-tf.math.reduce_mean(UpperCamelCase_ ).numpy() lowerCamelCase_ : Tuple =-21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
144
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
97
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A__: Tuple = { '''configuration_mobilebert''': [ '''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileBertConfig''', '''MobileBertOnnxConfig''', ], '''tokenization_mobilebert''': ['''MobileBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: int = ['''MobileBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: int = [ '''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileBertForMaskedLM''', '''MobileBertForMultipleChoice''', '''MobileBertForNextSentencePrediction''', '''MobileBertForPreTraining''', '''MobileBertForQuestionAnswering''', '''MobileBertForSequenceClassification''', '''MobileBertForTokenClassification''', '''MobileBertLayer''', '''MobileBertModel''', '''MobileBertPreTrainedModel''', '''load_tf_weights_in_mobilebert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[int] = [ '''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileBertForMaskedLM''', '''TFMobileBertForMultipleChoice''', '''TFMobileBertForNextSentencePrediction''', '''TFMobileBertForPreTraining''', '''TFMobileBertForQuestionAnswering''', '''TFMobileBertForSequenceClassification''', '''TFMobileBertForTokenClassification''', '''TFMobileBertMainLayer''', '''TFMobileBertModel''', '''TFMobileBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
276
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowercase ( A__ ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Any = Generator( cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' if self.streaming: UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: UpperCamelCase__ :Optional[int] = None UpperCamelCase__ :int = None UpperCamelCase__ :Any = None UpperCamelCase__ :Any = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) UpperCamelCase__ :List[Any] = self.builder.as_dataset( split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
97
0
import random def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] , A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = a[left_index] __lowerCamelCase = left_index + 1 for j in range(left_index + 1 , __a ): if a[j] < pivot: __lowerCamelCase = a[i], a[j] i += 1 __lowerCamelCase = a[i - 1], a[left_index] return i - 1 def lowerCamelCase__ ( A__ : Any , A__ : Any , A__ : Any ): '''simple docstring''' if left < right: __lowerCamelCase = random.randint(__a , right - 1 ) __lowerCamelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound __lowerCamelCase = partition(__a , __a , __a ) quick_sort_random( __a , __a , __a ) # recursive quicksort to the left of the pivot point quick_sort_random( __a , pivot_index + 1 , __a ) # recursive quicksort to the right of the pivot point def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = input("""Enter numbers separated by a comma:\n""" ).strip() __lowerCamelCase = [int(__a ) for item in user_input.split(""",""" )] quick_sort_random(__a , 0 , len(__a ) ) print(__a ) if __name__ == "__main__": main()
12
'''simple docstring''' __snake_case = 65521 def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = 1 UpperCamelCase__ :Any = 0 for plain_chr in plain_text: UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER return (b << 16) | a
97
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCamelCase = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCamelCase = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCamelCase = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCamelCase = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]), ('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCamelCase = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCamelCase = ( ('''JH AH TH KH QH''', 23), ('''JH 9H TH KH QH''', 22), ('''JC KH JS JD JH''', 21), ('''KH KC 3S 3H 3D''', 20), ('''8C 9C 5C 3C TC''', 19), ('''JS QS 9H TS KH''', 18), ('''7C 7S KH 2H 7H''', 17), ('''3C KH 5D 5S KH''', 16), ('''QH 8H KD JH 8S''', 15), ('''2D 6D 9D TH 7D''', 14), ) def UpperCAmelCase__ ( ): '''simple docstring''' a__ =randrange(len(__a ) ), randrange(len(__a ) ) a__ =['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] a__ =SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def UpperCAmelCase__ ( _A : Any = 1_00 ): '''simple docstring''' return (generate_random_hand() for _ in range(__a )) @pytest.mark.parametrize('''hand, expected''' , __a ) def UpperCAmelCase__ ( _A : Optional[int] , _A : Dict ): '''simple docstring''' assert PokerHand(__a )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , __a ) def UpperCAmelCase__ ( _A : List[str] , _A : List[str] ): '''simple docstring''' assert PokerHand(__a )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , __a ) def UpperCAmelCase__ ( _A : Dict , _A : Tuple , _A : int ): '''simple docstring''' a__ =PokerHand(__a ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , __a ) def UpperCAmelCase__ ( _A : Optional[Any] , _A : Any ): '''simple docstring''' assert PokerHand(__a )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , __a ) def UpperCAmelCase__ ( _A : str , _A : Any ): '''simple docstring''' assert PokerHand(__a )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , __a ) def UpperCAmelCase__ ( _A : Tuple , _A : Optional[Any] , _A : Union[str, Any] ): '''simple docstring''' assert PokerHand(__a ).compare_with(PokerHand(__a ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def UpperCAmelCase__ ( _A : Union[str, Any] , _A : List[str] , _A : Union[str, Any] ): '''simple docstring''' assert PokerHand(__a ).compare_with(PokerHand(__a ) ) == expected def UpperCAmelCase__ ( ): '''simple docstring''' a__ =[PokerHand(__a ) for hand in SORTED_HANDS] a__ =poker_hands.copy() shuffle(__a ) a__ =chain(sorted(__a ) ) for index, hand in enumerate(__a ): assert hand == poker_hands[index] def UpperCAmelCase__ ( ): '''simple docstring''' a__ =[PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=__a ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def UpperCAmelCase__ ( ): '''simple docstring''' a__ =PokerHand('''2C 4S AS 3D 5C''' ) a__ =True a__ =[5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def UpperCAmelCase__ ( ): '''simple docstring''' a__ =0 a__ =os.path.abspath(os.path.dirname(__a ) ) a__ =os.path.join(__a , '''poker_hands.txt''' ) with open(__a ) as file_hand: for line in file_hand: a__ =line[:14].strip() a__ =line[15:].strip() a__ =PokerHand(__a ), PokerHand(__a ) a__ =player.compare_with(__a ) if output == "Win": answer += 1 assert answer == 3_76
188
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowercase ( A__ ): """simple docstring""" _a = 'camembert' def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :Optional[int] = num_hidden_layers UpperCamelCase__ :List[Any] = num_attention_heads UpperCamelCase__ :Union[str, Any] = hidden_act UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :int = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[str] = layer_norm_eps UpperCamelCase__ :int = position_embedding_type UpperCamelCase__ :Any = use_cache UpperCamelCase__ :Any = classifier_dropout class lowercase ( A__ ): """simple docstring""" @property def lowerCAmelCase__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
97
0
from math import isclose, sqrt def a_ ( __lowercase : int , __lowercase : str , __lowercase : Tuple ) -> tuple[float, float, float]: _snake_case = point_y / 4 / point_x _snake_case = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) _snake_case = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _snake_case = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _snake_case = outgoing_gradient**2 + 4 _snake_case = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _snake_case = (point_y - outgoing_gradient * point_x) ** 2 - 100 _snake_case = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _snake_case = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _snake_case = x_minus if isclose(__a , __a ) else x_plus _snake_case = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( __lowercase : str = 1.4 , __lowercase : int = -9.6 ) -> int: _snake_case = 0 _snake_case = first_x_coord _snake_case = first_y_coord _snake_case = (10.1 - point_y) / (0.0 - point_x) while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0): _snake_case = next_point(__a , __a , __a ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F'{solution() = }')
282
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ :str = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :str = min_resolution UpperCamelCase__ :Optional[Any] = max_resolution UpperCamelCase__ :int = do_resize UpperCamelCase__ :Optional[Any] = size UpperCamelCase__ :Tuple = do_normalize UpperCamelCase__ :List[Any] = image_mean UpperCamelCase__ :Dict = image_std UpperCamelCase__ :Union[str, Any] = do_rescale UpperCamelCase__ :Union[str, Any] = rescale_factor UpperCamelCase__ :Union[str, Any] = do_pad def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if not batched: UpperCamelCase__ :List[str] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ :Dict = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ :int = self.size['''shortest_edge'''] UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ :str = self.size['''shortest_edge'''] UpperCamelCase__ :str = self.size['''shortest_edge'''] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.loads(f.read() ) UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target} # encode them UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify orig_size UpperCamelCase__ :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ ) # verify orig_size UpperCamelCase__ :List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
97
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : int =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase : Union[str, Any] =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: UpperCamelCase__ : List[Any] = state_dict.pop(__a ) UpperCamelCase__ : int = val def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: UpperCamelCase__ : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) UpperCamelCase__ : List[str] = value else: UpperCamelCase__ : Dict = value return new_state_dict def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: UpperCamelCase__ : Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ : Optional[Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) UpperCamelCase__ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : Any = in_proj_weight[:256, :] UpperCamelCase__ : Tuple = in_proj_bias[:256] UpperCamelCase__ : Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ : Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ : Tuple = in_proj_weight[-256:, :] UpperCamelCase__ : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ : List[str] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) UpperCamelCase__ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : Any = in_proj_weight[:256, :] UpperCamelCase__ : Optional[int] = in_proj_bias[:256] UpperCamelCase__ : Tuple = in_proj_weight[256:512, :] UpperCamelCase__ : Dict = in_proj_bias[256:512] UpperCamelCase__ : Any = in_proj_weight[-256:, :] UpperCamelCase__ : Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ : List[str] = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) UpperCamelCase__ : Any = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ : Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ : Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ : Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ : Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ : str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ : Tuple = in_proj_bias_cross_attn[-256:] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: UpperCamelCase__ : str = image.size UpperCamelCase__ : Optional[Any] = max(__a , __a ) UpperCamelCase__ : List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ : Dict = target_max_size / current_max_size UpperCamelCase__ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: UpperCamelCase__ : Any = F.to_tensor(__a ) UpperCamelCase__ : int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: logger.info("Converting model..." ) # load original state dict UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ : Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ : Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): UpperCamelCase__ : Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ : int = val # create HuggingFace model and load state dict UpperCamelCase__ : str = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ : List[str] = 15 UpperCamelCase__ : int = 2 UpperCamelCase__ : Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ : int = idalabel UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ : int = 125 UpperCamelCase__ : List[str] = 6 UpperCamelCase__ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ : Dict = idalabel UpperCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : List[Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 ) UpperCamelCase__ : int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ : Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__a ) UpperCamelCase__ : Tuple = Image.open(__a ).convert("RGB" ) UpperCamelCase__ : int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ : Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ : Dict = (1, 15, 3) UpperCamelCase__ : List[Any] = torch.tensor( [[-6.7_8_9_7, -16.9985, 6.7_9_3_7], [-8.0_1_8_6, -22.2192, 6.9_6_7_7], [-7.3_1_1_7, -21.0708, 7.4_0_5_5]] ) UpperCamelCase__ : Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ : Optional[Any] = (1, 125, 7) UpperCamelCase__ : Dict = torch.tensor( [[-18.1430, -8.3_2_1_4, 4.8_2_7_4], [-18.4685, -7.1_3_6_1, -4.2_6_6_7], [-26.3693, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ : List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) UpperCamelCase__ : Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": lowerCamelCase : int =argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase : str =parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
189
'''simple docstring''' from collections import defaultdict class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCamelCase__ :Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1 def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCamelCase__ :Optional[int] = total_ways_util return self.dp[mask][task_no] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": __snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
97
0
'''simple docstring''' from scipy.stats import pearsonr import datasets A_ = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" A_ = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n" A_ = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def __UpperCamelCase ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) ,reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] ,) def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): if return_pvalue: SCREAMING_SNAKE_CASE:Any = pearsonr(UpperCamelCase_ ,UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ ,UpperCamelCase_ )[0] )}
139
'''simple docstring''' import csv import tweepy # Twitter API credentials __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' def a ( __a ) -> None: '''simple docstring''' UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a ) auth.set_access_token(__a , __a ) UpperCamelCase__ :List[str] = tweepy.API(__a ) # initialize a list to hold all the tweepy Tweets UpperCamelCase__ :Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 ) # save most recent tweets alltweets.extend(__a ) # save the id of the oldest tweet less one UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__a ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates UpperCamelCase__ :Union[str, Any] = api.user_timeline( screen_name=__a , count=200 , max_id=__a ) # save most recent tweets alltweets.extend(__a ) # update the id of the oldest tweet less one UpperCamelCase__ :Tuple = alltweets[-1].id - 1 print(f'''...{len(__a )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f: UpperCamelCase__ :Tuple = csv.writer(__a ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__a ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
97
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( A__ ): _a = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **A_ : Any): lowerCAmelCase_ : Tuple = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_) return config def UpperCAmelCase__ ( self : Union[str, Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_) def UpperCAmelCase__ ( self : List[str]): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_) def UpperCAmelCase__ ( self : Any): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_) def UpperCAmelCase__ ( self : Tuple): self.check_over_configs(thresholding=UpperCamelCase_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def UpperCAmelCase__ ( self : int): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_) def UpperCAmelCase__ ( self : Union[str, Any]): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : int = self.scheduler_classes[0] lowerCAmelCase_ : Tuple = self.get_scheduler_config() lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0_0979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.02)) < 1e-5 def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : List[Any] = self.scheduler_classes[0] lowerCAmelCase_ : Any = self.get_scheduler_config() lowerCAmelCase_ : int = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Dict = len(UpperCamelCase_) lowerCAmelCase_ : Tuple = self.dummy_model() lowerCAmelCase_ : int = self.dummy_sample_deter lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0) for t in reversed(range(UpperCamelCase_)): # 1. predict noise residual lowerCAmelCase_ : Dict = model(UpperCamelCase_ , UpperCamelCase_) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ : Any = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase_ : Optional[int] = pred_prev_sample lowerCAmelCase_ : Dict = torch.sum(torch.abs(UpperCamelCase_)) lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(UpperCamelCase_)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Dict = self.scheduler_classes[0] lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''') lowerCAmelCase_ : List[Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Optional[Any] = len(UpperCamelCase_) lowerCAmelCase_ : List[str] = self.dummy_model() lowerCAmelCase_ : Optional[int] = self.dummy_sample_deter lowerCAmelCase_ : Tuple = torch.manual_seed(0) for t in reversed(range(UpperCamelCase_)): # 1. predict noise residual lowerCAmelCase_ : Tuple = model(UpperCamelCase_ , UpperCamelCase_) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ : Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase_ : List[str] = pred_prev_sample lowerCAmelCase_ : List[str] = torch.sum(torch.abs(UpperCamelCase_)) lowerCAmelCase_ : Tuple = torch.mean(torch.abs(UpperCamelCase_)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config() lowerCAmelCase_ : List[Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : List[str] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_) lowerCAmelCase_ : Tuple = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_): if i == len(UpperCamelCase_) - 1: lowerCAmelCase_ : List[str] = -1 else: lowerCAmelCase_ : List[str] = timesteps[i + 1] lowerCAmelCase_ : Optional[int] = scheduler.previous_timestep(UpperCamelCase_) lowerCAmelCase_ : Optional[Any] = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ : Tuple = self.scheduler_classes[0] lowerCAmelCase_ : List[Any] = self.get_scheduler_config() lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : List[str] = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=UpperCamelCase_) def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config() lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase_ : List[Any] = len(UpperCamelCase_) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_) def UpperCAmelCase__ ( self : int): lowerCAmelCase_ : str = self.scheduler_classes[0] lowerCAmelCase_ : Any = self.get_scheduler_config() lowerCAmelCase_ : Dict = scheduler_class(**UpperCamelCase_) lowerCAmelCase_ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_)
103
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCamelCase__ :List[str] = value else: UpperCamelCase__ :Dict = value return new_state_dict def a ( __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Tuple = in_proj_bias[:256] UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ :Tuple = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Optional[int] = in_proj_bias[:256] UpperCamelCase__ :Tuple = in_proj_weight[256:512, :] UpperCamelCase__ :Dict = in_proj_bias[256:512] UpperCamelCase__ :Any = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ :List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:] def a ( __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :str = image.size UpperCamelCase__ :Optional[Any] = max(__a , __a ) UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ :Dict = target_max_size / current_max_size UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = F.to_tensor(__a ) UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a ( __a , __a , __a ) -> Dict: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ :Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ :Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCamelCase__ :Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val # create HuggingFace model and load state dict UpperCamelCase__ :str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ :List[str] = 15 UpperCamelCase__ :int = 2 UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ :int = idalabel UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ :int = 125 UpperCamelCase__ :List[str] = 6 UpperCamelCase__ :Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ :Dict = idalabel UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ :List[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCamelCase__ :int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a ) UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' ) UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ :Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ :Dict = (1, 15, 3) UpperCamelCase__ :List[Any] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ :Optional[Any] = (1, 125, 7) UpperCamelCase__ :Dict = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCamelCase__ :Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
97
0
'''simple docstring''' class a__ : def __init__( self , _UpperCamelCase ): """simple docstring""" _lowercase : Any = set_counts _lowercase : Optional[int] = max(UpperCamelCase_ ) _lowercase : List[Any] = len(UpperCamelCase_ ) _lowercase : Optional[int] = [1] * num_sets _lowercase : List[Any] = list(range(UpperCamelCase_ ) ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : Union[str, Any] = self.get_parent(UpperCamelCase_ ) _lowercase : Optional[int] = self.get_parent(UpperCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] _lowercase : Tuple = 0 _lowercase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 _lowercase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] _lowercase : Tuple = 0 _lowercase : str = src_parent _lowercase : Optional[Any] = self.set_counts[src_parent] _lowercase : Tuple = max(self.max_set , UpperCamelCase_ ) return True def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" if self.parents[disj_set] == disj_set: return disj_set _lowercase : Dict = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
250
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( __a ) -> bool: '''simple docstring''' UpperCamelCase__ :int = int(number**0.5 ) return number == sq * sq def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]: '''simple docstring''' UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den UpperCamelCase__ :int = x_den * y_den * z_den UpperCamelCase__ :int = gcd(__a , __a ) top //= hcf bottom //= hcf return top, bottom def a ( __a = 35 ) -> int: '''simple docstring''' UpperCamelCase__ :set = set() UpperCamelCase__ :int UpperCamelCase__ :Fraction = Fraction(0 ) UpperCamelCase__ :tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 UpperCamelCase__ :int = x_num * y_den + x_den * y_num UpperCamelCase__ :Any = x_den * y_den UpperCamelCase__ :Tuple = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Optional[int] = int(sqrt(__a ) ) UpperCamelCase__ :int = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=-1 UpperCamelCase__ :Tuple = x_num * y_num UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den UpperCamelCase__ :List[str] = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Union[str, Any] = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num UpperCamelCase__ :Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :str = int(sqrt(__a ) ) UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Dict = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :int = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a , __a ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
97
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _UpperCamelCase: Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _UpperCamelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
255
'''simple docstring''' def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Optional[int] = [] UpperCamelCase__ :int = 1 while len(__a ) < 1e6: constant.append(str(__a ) ) i += 1 UpperCamelCase__ :Union[str, Any] = ''''''.join(__a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
97
0
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _UpperCAmelCase ( A__ ): '''simple docstring''' lowerCamelCase__ =['vqvae'] def __init__(self , a_ , a_ , a_ , a_ , ): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , mel=UpperCamelCase_ , vqvae=UpperCamelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 50 if isinstance(self.scheduler , UpperCamelCase_ ) else 10_00 @torch.no_grad() def __call__(self , a_ = 1 , a_ = None , a_ = None , a_ = 0 , a_ = 0 , a_ = None , a_ = None , a_ = 0 , a_ = 0 , a_ = None , a_ = 0 , a_ = None , a_ = None , a_=True , ): '''simple docstring''' __snake_case : Dict = steps or self.get_default_steps() self.scheduler.set_timesteps(UpperCamelCase_ ) __snake_case : Optional[int] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __snake_case : str = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __snake_case : List[Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=UpperCamelCase_ , device=self.device , ) __snake_case : str = noise __snake_case : Dict = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(UpperCamelCase_ , UpperCamelCase_ ) __snake_case : List[str] = self.mel.audio_slice_to_image(UpperCamelCase_ ) __snake_case : List[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) __snake_case : List[str] = (input_image / 2_55) * 2 - 1 __snake_case : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __snake_case : int = self.vqvae.encode(torch.unsqueeze(UpperCamelCase_ , 0 ) ).latent_dist.sample( generator=UpperCamelCase_ )[0] __snake_case : Dict = self.vqvae.config.scaling_factor * input_images if start_step > 0: __snake_case : Any = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , self.scheduler.timesteps[start_step - 1] ) __snake_case : List[str] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __snake_case : Any = int(mask_start_secs * pixels_per_second ) __snake_case : str = int(mask_end_secs * pixels_per_second ) __snake_case : str = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , UpperCamelCase_ ): __snake_case : List[str] = self.unet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )['''sample'''] else: __snake_case : List[str] = self.unet(UpperCamelCase_ , UpperCamelCase_ )['''sample'''] if isinstance(self.scheduler , UpperCamelCase_ ): __snake_case : List[str] = self.scheduler.step( model_output=UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , )['''prev_sample'''] else: __snake_case : Union[str, Any] = self.scheduler.step( model_output=UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ , )['''prev_sample'''] if mask is not None: if mask_start > 0: __snake_case : Union[str, Any] = mask[:, step, :, :mask_start] if mask_end > 0: __snake_case : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __snake_case : List[str] = 1 / self.vqvae.config.scaling_factor * images __snake_case : Optional[int] = self.vqvae.decode(UpperCamelCase_ )['''sample'''] __snake_case : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 ) __snake_case : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __snake_case : Optional[Any] = (images * 2_55).round().astype('''uint8''' ) __snake_case : Optional[int] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(UpperCamelCase_ , mode='''RGB''' ).convert('''L''' ) for _ in images) ) __snake_case : Optional[int] = [self.mel.image_to_audio(UpperCamelCase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase_ ) ) @torch.no_grad() def SCREAMING_SNAKE_CASE (self , a_ , a_ = 50 ): '''simple docstring''' assert isinstance(self.scheduler , UpperCamelCase_ ) self.scheduler.set_timesteps(UpperCamelCase_ ) __snake_case : str = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) __snake_case : List[str] = (sample / 2_55) * 2 - 1 __snake_case : Dict = torch.Tensor(UpperCamelCase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __snake_case : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __snake_case : List[Any] = self.scheduler.alphas_cumprod[t] __snake_case : Optional[int] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __snake_case : int = 1 - alpha_prod_t __snake_case : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ )['''sample'''] __snake_case : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output __snake_case : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __snake_case : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def SCREAMING_SNAKE_CASE (a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = acos(torch.dot(torch.flatten(UpperCamelCase_ ) , torch.flatten(UpperCamelCase_ ) ) / torch.norm(UpperCamelCase_ ) / torch.norm(UpperCamelCase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase_ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase_ )
102
'''simple docstring''' from PIL import Image def a ( __a , __a ) -> Image: '''simple docstring''' def brightness(__a ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 __snake_case = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
97
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowercase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : List[str] =inspect.getfile(accelerate.test_utils ) lowerCamelCase_ : Optional[int] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) lowerCamelCase_ : Optional[int] =os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) lowerCamelCase_ : List[str] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def UpperCAmelCase__ ( self : List[str] ): print(F"""Found {torch.cuda.device_count()} devices.""" ) lowerCamelCase_ : List[str] =['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def UpperCAmelCase__ ( self : List[Any] ): print(F"""Found {torch.cuda.device_count()} devices.""" ) lowerCamelCase_ : Tuple =['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Tuple =['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def UpperCAmelCase__ ( self : Optional[int] ): print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) lowerCamelCase_ : Tuple =['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": A__ : int = Accelerator() A__ : Tuple = (accelerator.state.process_index + 2, 10) A__ : str = torch.randint(0, 10, shape).to(accelerator.device) A__ : Union[str, Any] = '' A__ : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." A__ : Dict = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." A__ : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
144
'''simple docstring''' from datetime import datetime as dt import os from github import Github __snake_case = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a ( ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' ) UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
97
0
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging A__: str = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any]=False ) -> Dict: try: import torch # noqa: F401 except ImportError: logger.error( """Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise if not is_sharded: _a : Optional[Any] =os.path.abspath(__a ) logger.info(F"Loading PyTorch weights from {pt_path}" ) _a : Tuple =torch.load(__a ,map_location="""cpu""" ) logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." ) _a : List[Any] =convert_pytorch_state_dict_to_flax(__a ,__a ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files _a : Union[str, Any] =convert_pytorch_sharded_state_dict_to_flax(__a ,__a ) return flax_state_dict def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ,) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(_UpperCAmelCase : List[Any] ) -> bool: return len(set(__a ) & {key, (model_prefix,) + key} ) > 0 # layer norm _a : Dict =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean _a : Dict =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var _a : Optional[Any] =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # embedding _a : Optional[int] =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # conv layer _a : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__a ): _a : int =pt_tensor.transpose(2 ,3 ,1 ,0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _a : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__a ): _a : str =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _a : List[str] =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _a : List[Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 _a : Tuple =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): _a : Tuple =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): _a : Union[str, Any] =pt_tuple_key[-2] + '''_v''' if name is not None: _a : Tuple =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ) -> Tuple: _a : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} _a : List[str] =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: _a : Optional[Any] =flax_model.params['''params'''] else: _a : Optional[Any] =flax_model.params _a : Union[str, Any] =flatten_dict(__a ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _a : List[str] =flatten_dict(flax_model.params["""batch_stats"""] ) random_flax_state_dict.update(__a ) _a : Any ={} _a : Optional[int] =(model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) _a : Union[str, Any] =(model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _a : int =tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary _a : Union[str, Any] =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _a : List[str] =pt_tuple_key[1:] # Correctly rename weight parameters _a : int =rename_key_and_reshape_tensor( __a ,__a ,__a ,__a ) # add model prefix if necessary _a : int =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _a : Optional[Any] =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: _a : Union[str, Any] =jnp.asarray(__a ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__a ,__a ) continue # also add unexpected weight so that warning is thrown _a : Tuple =jnp.asarray(__a ) else: # also add unexpected weight so that warning is thrown _a : Union[str, Any] =jnp.asarray(__a ) return unflatten_dict(__a ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ) -> List[str]: import torch # Load the index _a : int ={} for shard_file in shard_filenames: # load using msgpack utils _a : List[Any] =torch.load(__a ) _a : List[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} _a : List[Any] =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _a : int =flax_model.params['''params'''] _a : Union[str, Any] =flatten_dict(__a ) random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) ) else: _a : Dict =flax_model.params _a : int =flatten_dict(__a ) _a : List[str] =(model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) _a : int =(model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _a : str =tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary _a : Dict =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _a : Dict =pt_tuple_key[1:] # Correctly rename weight parameters _a : Optional[Any] =rename_key_and_reshape_tensor( __a ,__a ,__a ,__a ) # add model prefix if necessary _a : Any =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _a : List[str] =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: _a : List[Any] =jnp.asarray(__a ) continue if "var" in flax_key[-1]: _a : List[Any] =jnp.asarray(__a ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__a ,__a ) continue # also add unexpected weight so that warning is thrown _a : str =jnp.asarray(__a ) else: # also add unexpected weight so that warning is thrown _a : List[Any] =jnp.asarray(__a ) return unflatten_dict(__a ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> Tuple: _a : Tuple =os.path.abspath(__a ) logger.info(F"Loading Flax weights from {flax_checkpoint_path}" ) # import correct flax class _a : str =getattr(__a ,"""Flax""" + model.__class__.__name__ ) # load flax weight dict with open(__a ,"""rb""" ) as state_f: try: _a : Tuple =from_bytes(__a ,state_f.read() ) except UnpicklingError: raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(__a ,__a ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ) -> int: try: import torch # noqa: F401 except ImportError: logger.error( """Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights _a : List[Any] =flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa ,__a ) ).values() if any(__a ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) _a : str =jax.tree_util.tree_map( lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,__a ) _a : Optional[Any] =flatten_dict(__a ) _a : str =pt_model.state_dict() _a : List[Any] =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) _a : Optional[int] =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys _a : Optional[Any] =[] _a : str =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _a : Tuple =flax_key_tuple[0] == pt_model.base_model_prefix _a : int ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: _a : str =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: _a : List[str] =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__a ) not in pt_model_dict: # conv layer _a : Any =flax_key_tuple[:-1] + ('''weight''',) _a : Tuple =jnp.transpose(__a ,(3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__a ) not in pt_model_dict: # linear layer _a : Dict =flax_key_tuple[:-1] + ('''weight''',) _a : Tuple =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _a : Union[str, Any] =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: _a : Optional[Any] =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: _a : List[Any] =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: _a : Optional[int] ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: _a : Any ='''.'''.join(__a ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. _a : Dict ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: _a : Dict =key.split(""".""" ) _a : Union[str, Any] =None if key_components[-3::2] == ["parametrizations", "original0"]: _a : Union[str, Any] =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: _a : List[Any] =key_components[-2] + '''_v''' if name is not None: _a : Dict =key_components[:-3] + [name] _a : Optional[int] ='''.'''.join(__a ) _a : Any =key if flax_key in special_pt_names: _a : Tuple =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict _a : Dict =np.asarray(__a ) if not isinstance(__a ,np.ndarray ) else flax_tensor _a : Optional[int] =torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list _a : List[str] =list(__a ) if len(__a ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) else: logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" ) if len(__a ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) else: logger.warning( F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" """If your task is similar to the task the model of the checkpoint was trained on, """ F"you can already use {pt_model.__class__.__name__} for predictions without further training." ) return pt_model
276
'''simple docstring''' import re from filelock import FileLock try: import nltk __snake_case = True except (ImportError, ModuleNotFoundError): __snake_case = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def a ( __a ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
97
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class lowerCamelCase__( A__): UpperCAmelCase__ : List[Any] = 'deberta-v2' def __init__( self: List[str] , UpperCamelCase_: Dict=12_81_00 , UpperCamelCase_: List[str]=15_36 , UpperCamelCase_: Union[str, Any]=24 , UpperCamelCase_: Optional[Any]=24 , UpperCamelCase_: Union[str, Any]=61_44 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: int=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Dict=5_12 , UpperCamelCase_: int=0 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: str=1E-7 , UpperCamelCase_: List[str]=False , UpperCamelCase_: Dict=-1 , UpperCamelCase_: Dict=0 , UpperCamelCase_: str=True , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=0 , UpperCamelCase_: int="gelu" , **UpperCamelCase_: Any , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = relative_attention __lowerCamelCase = max_relative_positions __lowerCamelCase = pad_token_id __lowerCamelCase = position_biased_input # Backwards compatibility if type(UpperCamelCase_ ) == str: __lowerCamelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )] __lowerCamelCase = pos_att_type __lowerCamelCase = vocab_size __lowerCamelCase = layer_norm_eps __lowerCamelCase = kwargs.get("""pooler_hidden_size""" , UpperCamelCase_ ) __lowerCamelCase = pooler_dropout __lowerCamelCase = pooler_hidden_act class lowerCamelCase__( A__): @property def lowerCAmelCase__ ( self: Optional[int] ): if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCAmelCase__ ( self: Tuple ): return 12 def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] = -1 , UpperCamelCase_: Optional[int] = -1 , UpperCamelCase_: str = -1 , UpperCamelCase_: Optional[Any] = False , UpperCamelCase_: int = None , UpperCamelCase_: Optional[Any] = 3 , UpperCamelCase_: Optional[int] = 40 , UpperCamelCase_: int = 40 , UpperCamelCase_: Union[str, Any] = None , ): __lowerCamelCase = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
12
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a ) if save_dir is None: UpperCamelCase__ :Any = f'''{dataset}-{pair}''' UpperCamelCase__ :Dict = Path(__a ) save_dir.mkdir(exist_ok=__a ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' ) UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' ) UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' ) UpperCamelCase__ :Tuple = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): UpperCamelCase__ :Union[str, Any] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
97
0
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def UpperCAmelCase__ ( _A : Optional[int] ): '''simple docstring''' a__ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__a , __a ) def UpperCAmelCase__ ( _A : int ): '''simple docstring''' a__ =list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: a__ =s_dict.pop(__a ) elif "subsample" in key: a__ =s_dict.pop(__a ) def UpperCAmelCase__ ( _A : Dict ): '''simple docstring''' a__ =emb.weight.shape a__ =nn.Linear(__a , __a , bias=__a ) a__ =emb.weight.data return lin_layer def UpperCAmelCase__ ( _A : int , _A : Optional[Any] ): '''simple docstring''' a__ =torch.load(__a , map_location='''cpu''' ) a__ =mam_aaa['''args'''] a__ =mam_aaa['''model'''] a__ =state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__a ) rename_keys(__a ) a__ =state_dict['''decoder.embed_tokens.weight'''].shape[0] a__ =args.share_decoder_input_output_embed a__ =[int(__a ) for i in args.conv_kernel_sizes.split(''',''' )] a__ =SpeechaTextConfig( vocab_size=__a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__a ) , conv_channels=args.conv_channels , conv_kernel_sizes=__a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__a , num_beams=5 , max_length=2_00 , use_cache=__a , decoder_start_token_id=2 , early_stopping=__a , ) a__ =SpeechaTextForConditionalGeneration(__a ) a__ =model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ =make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ =lm_head_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
188
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''DPTFeatureExtractor'''] __snake_case = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
97
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Any = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys _lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
282
'''simple docstring''' def a ( __a , __a ) -> int: '''simple docstring''' if len(__a ) != len(__a ): raise ValueError('''String lengths must match!''' ) UpperCamelCase__ :Union[str, Any] = 0 for chara, chara in zip(__a , __a ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
97
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase : Optional[Any] =logging.get_logger(__name__) # General docstring lowerCamelCase : Optional[Any] ='''RegNetConfig''' # Base docstring lowerCamelCase : Any ='''facebook/regnet-y-040''' lowerCamelCase : Dict =[1, 1088, 7, 7] # Image classification docstring lowerCamelCase : Dict ='''facebook/regnet-y-040''' lowerCamelCase : Union[str, Any] ='''tabby, tabby cat''' lowerCamelCase : Union[str, Any] =[ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __a ( tf.keras.layers.Layer ): def __init__( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] = 3 , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : str = 1 , SCREAMING_SNAKE_CASE : Any = "relu" , **SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb UpperCamelCase__ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) UpperCamelCase__ : Dict = tf.keras.layers.ConvaD( filters=UpperCamelCase_ , kernel_size=UpperCamelCase_ , strides=UpperCamelCase_ , padding="VALID" , groups=UpperCamelCase_ , use_bias=UpperCamelCase_ , name="convolution" , ) UpperCamelCase__ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" ) UpperCamelCase__ : Dict = ACTaFN[activation] if activation is not None else tf.identity def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.convolution(self.padding(UpperCamelCase_ ) ) UpperCamelCase__ : int = self.normalization(UpperCamelCase_ ) UpperCamelCase__ : Tuple = self.activation(UpperCamelCase_ ) return hidden_state class __a ( tf.keras.layers.Layer ): def __init__( self : Dict , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : Dict = config.num_channels UpperCamelCase__ : Union[str, Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = shape_list(UpperCamelCase_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) UpperCamelCase__ : int = tf.transpose(UpperCamelCase_ , perm=(0, 2, 3, 1) ) UpperCamelCase__ : Dict = self.embedder(UpperCamelCase_ ) return hidden_state class __a ( tf.keras.layers.Layer ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any = 2 , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : Optional[Any] = tf.keras.layers.ConvaD( filters=UpperCamelCase_ , kernel_size=1 , strides=UpperCamelCase_ , use_bias=UpperCamelCase_ , name="convolution" ) UpperCamelCase__ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" ) def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] = False ): '''simple docstring''' return self.normalization(self.convolution(UpperCamelCase_ ) , training=UpperCamelCase_ ) class __a ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name="pooler" ) UpperCamelCase__ : str = [ tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def __lowercase ( self : int , SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' UpperCamelCase__ : Any = self.pooler(UpperCamelCase_ ) for layer_module in self.attention: UpperCamelCase__ : Tuple = layer_module(UpperCamelCase_ ) UpperCamelCase__ : Tuple = hidden_state * pooled return hidden_state class __a ( tf.keras.layers.Layer ): def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] = 1 , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : Union[str, Any] = in_channels != out_channels or stride != 1 UpperCamelCase__ : Any = max(1 , out_channels // config.groups_width ) UpperCamelCase__ : Optional[Any] = ( TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. UpperCamelCase__ : Optional[Any] = [ TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name="layer.2" ), ] UpperCamelCase__ : List[Any] = ACTaFN[config.hidden_act] def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' UpperCamelCase__ : Any = hidden_state for layer_module in self.layers: UpperCamelCase__ : int = layer_module(UpperCamelCase_ ) UpperCamelCase__ : Union[str, Any] = self.shortcut(UpperCamelCase_ ) hidden_state += residual UpperCamelCase__ : str = self.activation(UpperCamelCase_ ) return hidden_state class __a ( tf.keras.layers.Layer ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : Optional[int] = in_channels != out_channels or stride != 1 UpperCamelCase__ : int = max(1 , out_channels // config.groups_width ) UpperCamelCase__ : Tuple = ( TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) UpperCamelCase__ : List[Any] = [ TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(UpperCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name="layer.3" ), ] UpperCamelCase__ : int = ACTaFN[config.hidden_act] def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' UpperCamelCase__ : List[Any] = hidden_state for layer_module in self.layers: UpperCamelCase__ : Optional[Any] = layer_module(UpperCamelCase_ ) UpperCamelCase__ : Optional[Any] = self.shortcut(UpperCamelCase_ ) hidden_state += residual UpperCamelCase__ : Dict = self.activation(UpperCamelCase_ ) return hidden_state class __a ( tf.keras.layers.Layer ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any = 2 , SCREAMING_SNAKE_CASE : Dict = 2 , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : List[str] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer UpperCamelCase__ : Optional[Any] = [ # downsampling is done in the first layer with stride of 2 layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , name="layers.0" ), *[layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' for layer_module in self.layers: UpperCamelCase__ : List[str] = layer_module(UpperCamelCase_ ) return hidden_state class __a ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : int = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) UpperCamelCase__ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCamelCase_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ , name=F'stages.{i+1}' ) ) def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple = False , SCREAMING_SNAKE_CASE : Union[str, Any] = True ): '''simple docstring''' UpperCamelCase__ : Any = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCamelCase__ : List[Any] = hidden_states + (hidden_state,) UpperCamelCase__ : Optional[Any] = stage_module(UpperCamelCase_ ) if output_hidden_states: UpperCamelCase__ : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ ) @keras_serializable class __a ( tf.keras.layers.Layer ): _lowerCAmelCase : Union[str, Any] = RegNetConfig def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ : Any = config UpperCamelCase__ : Dict = TFRegNetEmbeddings(UpperCamelCase_ , name="embedder" ) UpperCamelCase__ : Any = TFRegNetEncoder(UpperCamelCase_ , name="encoder" ) UpperCamelCase__ : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name="pooler" ) @unpack_inputs def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : List[Any] = False , ): '''simple docstring''' UpperCamelCase__ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : List[str] = self.embedder(UpperCamelCase_ , training=UpperCamelCase_ ) UpperCamelCase__ : Optional[int] = self.encoder( UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ ) UpperCamelCase__ : Union[str, Any] = encoder_outputs[0] UpperCamelCase__ : str = self.pooler(UpperCamelCase_ ) # Change to NCHW output format have uniformity in the modules UpperCamelCase__ : Tuple = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) ) UpperCamelCase__ : List[str] = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: UpperCamelCase__ : Union[str, Any] = tuple([tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class __a ( A__ ): _lowerCAmelCase : str = RegNetConfig _lowerCAmelCase : List[Any] = '''regnet''' _lowerCAmelCase : int = '''pixel_values''' @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} lowerCamelCase : Any =R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase : Optional[int] =R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , A__ , ) class __a ( A__ ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ : str = TFRegNetMainLayer(UpperCamelCase_ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Tuple=False , ): '''simple docstring''' UpperCamelCase__ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : Tuple = self.regnet( pixel_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , A__ , ) class __a ( A__ , A__ ): def __init__( self : str , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ : List[str] = config.num_labels UpperCamelCase__ : Dict = TFRegNetMainLayer(UpperCamelCase_ , name="regnet" ) # classification head UpperCamelCase__ : str = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : List[Any] = None , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : Tuple=False , ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : List[str] = self.regnet( UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ ) UpperCamelCase__ : str = outputs.pooler_output if return_dict else outputs[1] UpperCamelCase__ : Tuple = self.classifier[0](UpperCamelCase_ ) UpperCamelCase__ : Optional[int] = self.classifier[1](UpperCamelCase_ ) UpperCamelCase__ : Dict = None if labels is None else self.hf_compute_loss(labels=UpperCamelCase_ , logits=UpperCamelCase_ ) if not return_dict: UpperCamelCase__ : Optional[int] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
189
'''simple docstring''' def a ( __a ) -> "list[int]": '''simple docstring''' if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCamelCase__ :int = 1 if upper_limit > 0: UpperCamelCase__ :int = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__a ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: __snake_case = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
97
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging A_ = logging.get_logger(__name__) class _snake_case ( A__ ): _A : Optional[int] = ['''input_features''', '''attention_mask'''] def __init__( self : str ,SCREAMING_SNAKE_CASE__ : List[str]=80 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=16_000 ,SCREAMING_SNAKE_CASE__ : Dict=80 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Tuple=True ,**SCREAMING_SNAKE_CASE__ : List[str] ,): super().__init__(feature_size=UpperCamelCase_ ,sampling_rate=UpperCamelCase_ ,padding_value=UpperCamelCase_ ,**UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Union[str, Any] = num_mel_bins SCREAMING_SNAKE_CASE:int = do_ceptral_normalize SCREAMING_SNAKE_CASE:List[str] = normalize_means SCREAMING_SNAKE_CASE:Union[str, Any] = normalize_vars SCREAMING_SNAKE_CASE:Union[str, Any] = True def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,): SCREAMING_SNAKE_CASE:List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers SCREAMING_SNAKE_CASE:Optional[int] = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE:Tuple = ta_kaldi.fbank(UpperCamelCase_ ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] = True ,SCREAMING_SNAKE_CASE__ : List[str] = True ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.0 ,): if normalize_means: SCREAMING_SNAKE_CASE:Optional[Any] = x[:input_length].mean(axis=0 ) SCREAMING_SNAKE_CASE:Optional[Any] = np.subtract(UpperCamelCase_ ,UpperCamelCase_ ) if normalize_vars: SCREAMING_SNAKE_CASE:Union[str, Any] = x[:input_length].std(axis=0 ) SCREAMING_SNAKE_CASE:List[str] = np.divide(UpperCamelCase_ ,UpperCamelCase_ ) if input_length < x.shape[0]: SCREAMING_SNAKE_CASE:int = padding_value # make sure array is in float32 SCREAMING_SNAKE_CASE:List[str] = x.astype(np.floataa ) return x def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple = None ): SCREAMING_SNAKE_CASE:Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(UpperCamelCase_ ,UpperCamelCase_ ,self.normalize_means ,self.normalize_vars ,self.padding_value ) for x, n in zip(UpperCamelCase_ ,UpperCamelCase_ ) ] def __call__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict = False ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Any = False ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,SCREAMING_SNAKE_CASE__ : Any = None ,SCREAMING_SNAKE_CASE__ : Dict = None ,SCREAMING_SNAKE_CASE__ : Any = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) SCREAMING_SNAKE_CASE:Dict = isinstance(UpperCamelCase_ ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE:Tuple = is_batched_numpy or ( isinstance(UpperCamelCase_ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE:str = [np.asarray(UpperCamelCase_ ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ ,np.ndarray ): SCREAMING_SNAKE_CASE:int = np.asarray(UpperCamelCase_ ,dtype=np.floataa ) elif isinstance(UpperCamelCase_ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE:Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE:str = [raw_speech] # extract fbank features SCREAMING_SNAKE_CASE:Tuple = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech] # convert into correct format for padding SCREAMING_SNAKE_CASE:Optional[Any] = BatchFeature({"input_features": features} ) SCREAMING_SNAKE_CASE:Union[str, Any] = self.pad( UpperCamelCase_ ,padding=UpperCamelCase_ ,max_length=UpperCamelCase_ ,truncation=UpperCamelCase_ ,pad_to_multiple_of=UpperCamelCase_ ,return_attention_mask=UpperCamelCase_ ,**UpperCamelCase_ ,) # make sure list is in array format SCREAMING_SNAKE_CASE:Dict = padded_inputs.get("input_features" ) if isinstance(input_features[0] ,UpperCamelCase_ ): SCREAMING_SNAKE_CASE:str = [np.asarray(UpperCamelCase_ ,dtype=np.floataa ) for feature in input_features] SCREAMING_SNAKE_CASE:Tuple = padded_inputs.get("attention_mask" ) if attention_mask is not None: SCREAMING_SNAKE_CASE:Optional[int] = [np.asarray(UpperCamelCase_ ,dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: SCREAMING_SNAKE_CASE:Any = ( np.array(UpperCamelCase_ ,dtype=np.intaa ) if self._get_padding_strategies(UpperCamelCase_ ,max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD else None ) SCREAMING_SNAKE_CASE:Any = self.normalize( padded_inputs["input_features"] ,attention_mask=UpperCamelCase_ ) if return_tensors is not None: SCREAMING_SNAKE_CASE:Any = padded_inputs.convert_to_tensors(UpperCamelCase_ ) return padded_inputs
139
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a ( __a , __a ) -> Optional[int]: '''simple docstring''' assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase__ :Tuple = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def a ( __a , __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :int = tmp_path / '''cache''' UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} UpperCamelCase__ :Any = features.copy() if features else default_expected_features UpperCamelCase__ :Union[str, Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} UpperCamelCase__ :int = features.copy() UpperCamelCase__ :List[Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Optional[int] = tmp_path / '''cache''' UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_json_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' if issubclass(__a , __a ): UpperCamelCase__ :Union[str, Any] = jsonl_path elif issubclass(__a , __a ): UpperCamelCase__ :int = [jsonl_path] UpperCamelCase__ :Dict = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) def a ( __a , __a , __a=("train",) ) -> Optional[Any]: '''simple docstring''' assert isinstance(__a , __a ) for split in splits: UpperCamelCase__ :Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features UpperCamelCase__ :str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> str: '''simple docstring''' if split: UpperCamelCase__ :List[str] = {split: jsonl_path} else: UpperCamelCase__ :int = '''train''' UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path} UpperCamelCase__ :Any = tmp_path / '''cache''' UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def a ( __a ) -> Union[str, Any]: '''simple docstring''' return json.load(__a ) def a ( __a ) -> int: '''simple docstring''' return [json.loads(__a ) for line in buffer] class lowercase : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :int = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' with pytest.raises(UpperCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :Dict = f.read() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :int = f.read() assert exported_content == original_content
97
0
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __snake_case : def __init__( self : int , A_ : str , A_ : Any=1_3 , A_ : Dict=3_0 , A_ : Tuple=2 , A_ : Tuple=3 , A_ : str=True , A_ : int=True , A_ : str=3_2 , A_ : Tuple=5 , A_ : Dict=4 , A_ : Tuple=3_7 , A_ : Any="gelu" , A_ : str=0.1 , A_ : Tuple=0.1 , A_ : Optional[int]=1_0 , A_ : List[Any]=0.02 , A_ : Optional[int]=3 , A_ : Optional[int]=0.6 , A_ : int=None , ): lowerCAmelCase_ : int = parent lowerCAmelCase_ : List[str] = batch_size lowerCAmelCase_ : Optional[int] = image_size lowerCAmelCase_ : List[str] = patch_size lowerCAmelCase_ : int = num_channels lowerCAmelCase_ : Tuple = is_training lowerCAmelCase_ : Optional[int] = use_labels lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : List[str] = num_hidden_layers lowerCAmelCase_ : Optional[int] = num_attention_heads lowerCAmelCase_ : Optional[Any] = intermediate_size lowerCAmelCase_ : Tuple = hidden_act lowerCAmelCase_ : str = hidden_dropout_prob lowerCAmelCase_ : List[str] = attention_probs_dropout_prob lowerCAmelCase_ : Optional[Any] = type_sequence_label_size lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : int = mask_ratio lowerCAmelCase_ : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCAmelCase_ : Union[str, Any] = (image_size // patch_size) ** 2 lowerCAmelCase_ : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowerCAmelCase_ : Optional[Any] = None if self.use_labels: lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowerCAmelCase_ : Optional[int] = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : List[str]): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase__ ( self : int , A_ : Any , A_ : List[Any] , A_ : List[Any]): lowerCAmelCase_ : List[Any] = ViTMAEModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() lowerCAmelCase_ : Tuple = model(UpperCamelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Tuple , A_ : int): lowerCAmelCase_ : Optional[Any] = ViTMAEForPreTraining(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() lowerCAmelCase_ : Optional[Any] = model(UpperCamelCase_) lowerCAmelCase_ : str = (self.image_size // self.patch_size) ** 2 lowerCAmelCase_ : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) # test greyscale images lowerCAmelCase_ : str = 1 lowerCAmelCase_ : Dict = ViTMAEForPreTraining(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) lowerCAmelCase_ : str = model(UpperCamelCase_) lowerCAmelCase_ : List[str] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase_ : int = config_and_inputs lowerCAmelCase_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __snake_case ( A__ ,A__ ,unittest.TestCase ): _a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () _a = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} _a = False _a = False _a = False _a = False def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : Dict = ViTMAEModelTester(self) lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7) def UpperCAmelCase__ ( self : Optional[Any]): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''') def UpperCAmelCase__ ( self : Optional[Any]): pass def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(UpperCamelCase_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) lowerCAmelCase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear)) def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[int] = model_class(UpperCamelCase_) lowerCAmelCase_ : List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Tuple = [*signature.parameters.keys()] lowerCAmelCase_ : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_) def UpperCAmelCase__ ( self : Optional[Any] , A_ : Dict , A_ : Optional[int] , A_ : List[str]): np.random.seed(2) lowerCAmelCase_ : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) lowerCAmelCase_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) lowerCAmelCase_ : int = torch.from_numpy(UpperCamelCase_) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCAmelCase_ : List[Any] = pt_noise super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) lowerCAmelCase_ : Optional[Any] = outputs[0].cpu().numpy() lowerCAmelCase_ : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_) lowerCAmelCase_ : List[str] = model_class.from_pretrained(UpperCamelCase_) model.to(UpperCamelCase_) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): lowerCAmelCase_ : Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) # Make sure we don't have nans lowerCAmelCase_ : int = after_outputs[0].cpu().numpy() lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(UpperCamelCase_ , 1e-5) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''') def UpperCAmelCase__ ( self : Any): pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''') def UpperCAmelCase__ ( self : Optional[Any]): pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''') def UpperCAmelCase__ ( self : Dict): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''') def UpperCAmelCase__ ( self : Union[str, Any]): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def UpperCAmelCase__ ( self : int): pass @slow def UpperCAmelCase__ ( self : str): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Any = ViTMAEModel.from_pretrained(UpperCamelCase_) self.assertIsNotNone(UpperCamelCase_) def UpperCamelCase( ): lowerCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Dict): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''') if is_vision_available() else None @slow def UpperCAmelCase__ ( self : Dict): np.random.seed(2) lowerCAmelCase_ : Tuple = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''').to(UpperCamelCase_) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Dict = prepare_img() lowerCAmelCase_ : Tuple = image_processor(images=UpperCamelCase_ , return_tensors='''pt''').to(UpperCamelCase_) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCAmelCase_ : Optional[int] = ViTMAEConfig() lowerCAmelCase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) lowerCAmelCase_ : Optional[Any] = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): lowerCAmelCase_ : Dict = model(**UpperCamelCase_ , noise=torch.from_numpy(UpperCamelCase_).to(device=UpperCamelCase_)) # verify the logits lowerCAmelCase_ : str = torch.Size((1, 1_9_6, 7_6_8)) self.assertEqual(outputs.logits.shape , UpperCamelCase_) lowerCAmelCase_ : Dict = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]]) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase_) , atol=1e-4))
103
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowercase ( A__ ): """simple docstring""" _a = ComputeEnvironment.AMAZON_SAGEMAKER _a = True _a = 'ml.p3.2xlarge' _a = 'accelerate_sagemaker_execution_role' _a = 'hf-sm' _a = 'us-east-1' _a = 1 _a = 'accelerate-sagemaker-1' _a = '1.6' _a = '4.4' _a = 'train.py' _a = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] _a = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ ) assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ ) assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ ) assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ ) assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ ) with pytest.raises(UpperCamelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
97
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = '▁' _snake_case = {'vocab_file': 'sentencepiece.bpe.model'} _snake_case = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), } } _snake_case = { 'facebook/mbart-large-en-ro': 1_024, 'facebook/mbart-large-cc25': 1_024, } # fmt: off _snake_case = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class a__ ( A__ ): _SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask'] _SCREAMING_SNAKE_CASE : Any = [] _SCREAMING_SNAKE_CASE : Optional[Any] = [] def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ): """simple docstring""" _lowercase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token _lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) _lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) _lowercase : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _lowercase : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowercase : Tuple = 1 _lowercase : int = len(self.sp_model ) _lowercase : Dict = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ ) } _lowercase : List[Any] = {v: k for k, v in self.lang_code_to_id.items()} _lowercase : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowercase : Union[str, Any] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _lowercase : Any = src_lang if src_lang is not None else '''en_XX''' _lowercase : Optional[Any] = self.lang_code_to_id[self._src_lang] _lowercase : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): """simple docstring""" _lowercase : Dict = self.__dict__.copy() _lowercase : int = None _lowercase : Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self , _UpperCamelCase ): """simple docstring""" _lowercase : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowercase : Optional[int] = {} _lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowerCamelCase ( self ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowerCamelCase ( self ): """simple docstring""" return self._src_lang @src_lang.setter def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Optional[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) _lowercase : List[str] = [1] * len(self.prefix_tokens ) _lowercase : int = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" _lowercase : Optional[int] = [self.sep_token_id] _lowercase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) _lowercase : Tuple = src_lang _lowercase : Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : List[str] = self.convert_tokens_to_ids(UpperCamelCase_ ) _lowercase : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowercase : Any = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : List[str] = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowercase : int = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: _lowercase : Any = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ): """simple docstring""" _lowercase : Optional[Any] = src_lang _lowercase : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCamelCase ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Any = self.lang_code_to_id[src_lang] _lowercase : int = [] _lowercase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Dict = self.lang_code_to_id[lang] _lowercase : Optional[Any] = [] _lowercase : Tuple = [self.eos_token_id, self.cur_lang_code]
250
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def a ( __a ) -> int: '''simple docstring''' for param in module.parameters(): UpperCamelCase__ :Dict = False def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCamelCase__ :Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Dict = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def a ( ) -> str: '''simple docstring''' UpperCamelCase__ :int = datetime.now() UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' ) return timestamp
97
0
"""simple docstring""" from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP _UpperCamelCase: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name _UpperCamelCase: Optional[int] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n' def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ) -> Union[str, Any]: '''simple docstring''' lowercase : Any = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 lowercase : Tuple = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class a__ ( A__ ): def __init__( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[Any], lowerCAmelCase : int, lowerCAmelCase : Optional[Any], lowerCAmelCase : str, ) -> str: super().__init__() self.register_modules( text_encoder=UpperCamelCase_, tokenizer=UpperCamelCase_, unet=UpperCamelCase_, scheduler=UpperCamelCase_, movq=UpperCamelCase_, ) lowercase : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowercase ( self : List[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[int], lowerCAmelCase : str, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : str ) -> str: if latents is None: lowercase : str = randn_tensor(UpperCamelCase_, generator=UpperCamelCase_, device=UpperCamelCase_, dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowercase : Dict = latents.to(UpperCamelCase_ ) lowercase : Any = latents * scheduler.init_noise_sigma return latents def lowercase ( self : str, lowerCAmelCase : str, lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : List[str], lowerCAmelCase : str=None, ) -> List[Any]: lowercase : List[Any] = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_, UpperCamelCase_ ) else 1 # get prompt text embeddings lowercase : List[str] = self.tokenizer( UpperCamelCase_, padding='max_length', truncation=UpperCamelCase_, max_length=77, return_attention_mask=UpperCamelCase_, add_special_tokens=UpperCamelCase_, return_tensors='pt', ) lowercase : str = text_inputs.input_ids lowercase : List[str] = self.tokenizer(UpperCamelCase_, padding='longest', return_tensors='pt' ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase_, UpperCamelCase_ ): lowercase : Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase : Union[str, Any] = text_input_ids.to(UpperCamelCase_ ) lowercase : Optional[int] = text_inputs.attention_mask.to(UpperCamelCase_ ) lowercase : Optional[int] = self.text_encoder( input_ids=UpperCamelCase_, attention_mask=UpperCamelCase_ ) lowercase : Tuple = prompt_embeds.repeat_interleave(UpperCamelCase_, dim=0 ) lowercase : List[Any] = text_encoder_hidden_states.repeat_interleave(UpperCamelCase_, dim=0 ) lowercase : str = text_mask.repeat_interleave(UpperCamelCase_, dim=0 ) if do_classifier_free_guidance: lowercase : List[str] if negative_prompt is None: lowercase : Dict = [''''''] * batch_size elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !=''' f''' {type(UpperCamelCase_ )}.''' ) elif isinstance(UpperCamelCase_, UpperCamelCase_ ): lowercase : List[str] = [negative_prompt] elif batch_size != len(UpperCamelCase_ ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ' the batch size of `prompt`.' ) else: lowercase : Tuple = negative_prompt lowercase : int = self.tokenizer( UpperCamelCase_, padding='max_length', max_length=77, truncation=UpperCamelCase_, return_attention_mask=UpperCamelCase_, add_special_tokens=UpperCamelCase_, return_tensors='pt', ) lowercase : List[str] = uncond_input.input_ids.to(UpperCamelCase_ ) lowercase : Union[str, Any] = uncond_input.attention_mask.to(UpperCamelCase_ ) lowercase : Optional[Any] = self.text_encoder( input_ids=UpperCamelCase_, attention_mask=UpperCamelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase : Optional[int] = negative_prompt_embeds.shape[1] lowercase : List[Any] = negative_prompt_embeds.repeat(1, UpperCamelCase_ ) lowercase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt, UpperCamelCase_ ) lowercase : List[str] = uncond_text_encoder_hidden_states.shape[1] lowercase : Tuple = uncond_text_encoder_hidden_states.repeat(1, UpperCamelCase_, 1 ) lowercase : List[str] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, UpperCamelCase_, -1 ) lowercase : List[Any] = uncond_text_mask.repeat_interleave(UpperCamelCase_, dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) lowercase : Optional[int] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) lowercase : int = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def lowercase ( self : Any, lowerCAmelCase : Optional[Any]=0 ) -> Any: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowercase : int = torch.device(f'''cuda:{gpu_id}''' ) lowercase : Tuple = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_, UpperCamelCase_ ) def lowercase ( self : Optional[int], lowerCAmelCase : Tuple=0 ) -> Dict: if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) lowercase : Tuple = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('cpu', silence_dtype_warnings=UpperCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase : str = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: lowercase : Union[str, Any] = cpu_offload_with_hook(UpperCamelCase_, UpperCamelCase_, prev_module_hook=UpperCamelCase_ ) if self.safety_checker is not None: lowercase : Optional[Any] = cpu_offload_with_hook(self.safety_checker, UpperCamelCase_, prev_module_hook=UpperCamelCase_ ) # We'll offload the last model manually. lowercase : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowercase ( self : List[Any] ) -> Tuple: if not hasattr(self.unet, '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase_, '_hf_hook' ) and hasattr(module._hf_hook, 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self : Any, lowerCAmelCase : str, lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Optional[Any] = None, lowerCAmelCase : Tuple = 512, lowerCAmelCase : Tuple = 512, lowerCAmelCase : Union[str, Any] = 100, lowerCAmelCase : Optional[Any] = 4.0, lowerCAmelCase : int = 1, lowerCAmelCase : Any = None, lowerCAmelCase : List[str] = None, lowerCAmelCase : int = "pil", lowerCAmelCase : Dict = True, ) -> str: if isinstance(UpperCamelCase_, UpperCamelCase_ ): lowercase : int = 1 elif isinstance(UpperCamelCase_, UpperCamelCase_ ): lowercase : Optional[int] = len(UpperCamelCase_ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}''' ) lowercase : Optional[int] = self._execution_device lowercase : Any = batch_size * num_images_per_prompt lowercase : List[Any] = guidance_scale > 1.0 lowercase : Optional[Any] = self._encode_prompt( UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ) if isinstance(UpperCamelCase_, UpperCamelCase_ ): lowercase : Optional[Any] = torch.cat(UpperCamelCase_, dim=0 ) if isinstance(UpperCamelCase_, UpperCamelCase_ ): lowercase : Optional[int] = torch.cat(UpperCamelCase_, dim=0 ) if do_classifier_free_guidance: lowercase : int = image_embeds.repeat_interleave(UpperCamelCase_, dim=0 ) lowercase : List[Any] = negative_image_embeds.repeat_interleave(UpperCamelCase_, dim=0 ) lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to( dtype=prompt_embeds.dtype, device=UpperCamelCase_ ) self.scheduler.set_timesteps(UpperCamelCase_, device=UpperCamelCase_ ) lowercase : int = self.scheduler.timesteps lowercase : List[Any] = self.unet.config.in_channels lowercase : Dict = get_new_h_w(UpperCamelCase_, UpperCamelCase_, self.movq_scale_factor ) # create initial latent lowercase : List[Any] = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, self.scheduler, ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance lowercase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase : Any = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds} lowercase : Optional[int] = self.unet( sample=UpperCamelCase_, timestep=UpperCamelCase_, encoder_hidden_states=UpperCamelCase_, added_cond_kwargs=UpperCamelCase_, return_dict=UpperCamelCase_, )[0] if do_classifier_free_guidance: lowercase : Optional[Any] = noise_pred.split(latents.shape[1], dim=1 ) lowercase : List[Any] = noise_pred.chunk(2 ) lowercase : Optional[int] = variance_pred.chunk(2 ) lowercase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text], dim=1 ) if not ( hasattr(self.scheduler.config, 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase : int = noise_pred.split(latents.shape[1], dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase : Optional[int] = self.scheduler.step( UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, generator=UpperCamelCase_, ).prev_sample # post-processing lowercase : List[Any] = self.movq.decode(UpperCamelCase_, force_not_quantize=UpperCamelCase_ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowercase : List[str] = image * 0.5 + 0.5 lowercase : Optional[Any] = image.clamp(0, 1 ) lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": lowercase : str = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
255
'''simple docstring''' from scipy.stats import pearsonr import datasets __snake_case = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_pvalue: UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
97
0
"""simple docstring""" def lowercase ( _snake_case : List[str] , _snake_case : Tuple ) ->int: """simple docstring""" if len(__a ) != len(__a ): raise ValueError('''String lengths must match!''' ) __snake_case : Union[str, Any] = 0 for chara, chara in zip(__a , __a ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
102
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case = logging.get_logger(__name__) __snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __snake_case = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __snake_case = { '''facebook/blenderbot_small-90M''': 512, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Union[str, Any] = add_prefix_space def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.sep_token_id] UpperCamelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
97
0
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : int = logging.get_logger(__name__) A__ : Optional[int] = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class lowercase__ ( A__ ): _UpperCAmelCase :Optional[Any] = "xlnet" _UpperCAmelCase :Any = ["mems"] _UpperCAmelCase :Dict = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[Any] , snake_case__ : List[str]=3_2000 , snake_case__ : str=1024 , snake_case__ : Dict=24 , snake_case__ : int=16 , snake_case__ : Optional[int]=4096 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=True , snake_case__ : List[str]="bi" , snake_case__ : List[str]=0.02 , snake_case__ : Optional[int]=1E-12 , snake_case__ : str=0.1 , snake_case__ : List[str]=512 , snake_case__ : Dict=None , snake_case__ : str=True , snake_case__ : Any=False , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=-1 , snake_case__ : List[str]=False , snake_case__ : List[Any]="last" , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]="tanh" , snake_case__ : Tuple=0.1 , snake_case__ : Dict=5 , snake_case__ : int=5 , snake_case__ : Any=5 , snake_case__ : Dict=1 , snake_case__ : List[str]=2 , **snake_case__ : List[Any] , ): lowerCamelCase_ : Union[str, Any] =vocab_size lowerCamelCase_ : List[str] =d_model lowerCamelCase_ : Dict =n_layer lowerCamelCase_ : Any =n_head if d_model % n_head != 0: raise ValueError(F"""\'d_model % n_head\' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) lowerCamelCase_ : List[Any] =d_model // n_head lowerCamelCase_ : str =ff_activation lowerCamelCase_ : Optional[int] =d_inner lowerCamelCase_ : int =untie_r lowerCamelCase_ : Optional[int] =attn_type lowerCamelCase_ : Optional[int] =initializer_range lowerCamelCase_ : Optional[int] =layer_norm_eps lowerCamelCase_ : Tuple =dropout lowerCamelCase_ : Union[str, Any] =mem_len lowerCamelCase_ : Optional[int] =reuse_len lowerCamelCase_ : Optional[Any] =bi_data lowerCamelCase_ : Optional[Any] =clamp_len lowerCamelCase_ : Optional[Any] =same_length lowerCamelCase_ : List[str] =summary_type lowerCamelCase_ : Optional[Any] =summary_use_proj lowerCamelCase_ : Optional[Any] =summary_activation lowerCamelCase_ : int =summary_last_dropout lowerCamelCase_ : Dict =start_n_top lowerCamelCase_ : Union[str, Any] =end_n_top lowerCamelCase_ : Any =bos_token_id lowerCamelCase_ : int =pad_token_id lowerCamelCase_ : Optional[int] =eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead." , UpperCamelCase_ , ) lowerCamelCase_ : Optional[Any] =kwargs['''use_cache'''] lowerCamelCase_ : List[Any] =use_mems_eval lowerCamelCase_ : Any =use_mems_train super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) @property def UpperCAmelCase__ ( self : List[str] ): logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any ): raise NotImplementedError( F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
144
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
97
0
'''simple docstring''' from __future__ import annotations from decimal import Decimal from numpy import array def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list[list[float]]: _a : List[str] =Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix _a : Optional[int] =float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements _a : List[Any] =[[0.0, 0.0], [0.0, 0.0]] _a : int =matrix[1][1], matrix[0][0] _a : Union[str, Any] =-matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(__a ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule _a : Tuple =float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix _a : Any =[ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] _a : int =(d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) _a : Union[str, Any] =-( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) _a : Tuple =(d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) _a : Any =-( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) _a : Dict =(d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) _a : Tuple =-( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) _a : List[Any] =(d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) _a : str =-( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) _a : Tuple =(d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) _a : Optional[int] =array(__a ) for i in range(3 ): for j in range(3 ): _a : Optional[int] =cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix _a : str =array(__a ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(__a ) # Calculate the inverse of the matrix return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
276
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowercase ( A__ ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Any = Generator( cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' if self.streaming: UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: UpperCamelCase__ :Optional[int] = None UpperCamelCase__ :int = None UpperCamelCase__ :Any = None UpperCamelCase__ :Any = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) UpperCamelCase__ :List[Any] = self.builder.as_dataset( split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
97
0
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowerCamelCase__ ( A__ : Optional[Any] , A__ : str , A__ : str , A__ : Any , A__ : Dict , A__ : Optional[Any] ): '''simple docstring''' if (ksize % 2) == 0: __lowerCamelCase = ksize + 1 __lowerCamelCase = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__a ): for x in range(__a ): # distance from center __lowerCamelCase = x - ksize // 2 __lowerCamelCase = y - ksize // 2 # degree to radiant __lowerCamelCase = theta / 180 * np.pi __lowerCamelCase = np.cos(_theta ) __lowerCamelCase = np.sin(_theta ) # get kernel x __lowerCamelCase = cos_theta * px + sin_theta * py # get kernel y __lowerCamelCase = -sin_theta * px + cos_theta * py # fill kernel __lowerCamelCase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image UpperCAmelCase_ = imread('../image_data/lena.jpg') # turn image in gray scale value UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges UpperCAmelCase_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: UpperCAmelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) UpperCAmelCase_ = out / out.max() * 255 UpperCAmelCase_ = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
12
'''simple docstring''' __snake_case = 65521 def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = 1 UpperCamelCase__ :Any = 0 for plain_chr in plain_text: UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER return (b << 16) | a
97
0
from statistics import mean import numpy as np def UpperCAmelCase__ ( _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict ): '''simple docstring''' a__ =0 # Number of processes finished a__ =0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. a__ =[0] * no_of_process # List to include calculation results a__ =[0] * no_of_process # Sort by arrival time. a__ =[burst_time[i] for i in np.argsort(__a )] a__ =[process_name[i] for i in np.argsort(__a )] arrival_time.sort() while no_of_process > finished_process_count: a__ =0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: a__ =arrival_time[i] a__ =0 # Index showing the location of the process being performed a__ =0 # Saves the current response ratio. a__ =0 for i in range(0 , __a ): if finished_process[i] == 0 and arrival_time[i] <= current_time: a__ =(burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: a__ =temp a__ =i # Calculate the turn around time a__ =current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. a__ =1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def UpperCAmelCase__ ( _A : Dict , _A : Union[str, Any] , _A : Any , _A : Dict ): '''simple docstring''' a__ =[0] * no_of_process for i in range(0 , __a ): a__ =turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": lowerCamelCase = 5 lowerCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] lowerCamelCase = [1, 2, 3, 4, 5] lowerCamelCase = [1, 2, 3, 4, 5] lowerCamelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) lowerCamelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t""" f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}""" ) print(f"""average waiting time : {mean(waiting_time):.5f}""") print(f"""average turn around time : {mean(turn_around_time):.5f}""")
188
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowercase ( A__ ): """simple docstring""" _a = 'camembert' def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :Optional[int] = num_hidden_layers UpperCamelCase__ :List[Any] = num_attention_heads UpperCamelCase__ :Union[str, Any] = hidden_act UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :int = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[str] = layer_norm_eps UpperCamelCase__ :int = position_embedding_type UpperCamelCase__ :Any = use_cache UpperCamelCase__ :Any = classifier_dropout class lowercase ( A__ ): """simple docstring""" @property def lowerCAmelCase__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
97
0
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class SCREAMING_SNAKE_CASE__ ( A__ ): '''simple docstring''' def A ( self : List[str] ): '''simple docstring''' _snake_case = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase_ , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(UpperCamelCase_ , 'num_attention_heads' ) ) class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : str , lowercase : Optional[int] , lowercase : List[str]=13 , lowercase : List[str]=64 , lowercase : List[Any]=3 , lowercase : Any=3 , lowercase : int=2 , lowercase : str=1 , lowercase : Optional[int]=16 , lowercase : Union[str, Any]=[128, 256, 384] , lowercase : List[Any]=[4, 6, 8] , lowercase : List[str]=[2, 3, 4] , lowercase : str=[16, 16, 16] , lowercase : Optional[Any]=0 , lowercase : Any=[2, 2, 2] , lowercase : Tuple=[2, 2, 2] , lowercase : Optional[Any]=0.02 , lowercase : str=True , lowercase : Optional[Any]=True , lowercase : Tuple=2 , ): '''simple docstring''' _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = kernel_size _snake_case = stride _snake_case = padding _snake_case = hidden_sizes _snake_case = num_attention_heads _snake_case = depths _snake_case = key_dim _snake_case = drop_path_rate _snake_case = patch_size _snake_case = attention_ratio _snake_case = mlp_ratio _snake_case = initializer_range _snake_case = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] _snake_case = is_training _snake_case = use_labels _snake_case = num_labels _snake_case = initializer_range def A ( self : List[Any] ): '''simple docstring''' _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def A ( self : int ): '''simple docstring''' return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def A ( self : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[int] ): '''simple docstring''' _snake_case = LevitModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _snake_case = model(UpperCamelCase_ ) _snake_case = (self.image_size, self.image_size) _snake_case = image_size[0], image_size[1] for _ in range(4 ): _snake_case = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) _snake_case = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def A ( self : Optional[int] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : Any ): '''simple docstring''' _snake_case = self.num_labels _snake_case = LevitForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _snake_case = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : int ): '''simple docstring''' _snake_case = self.prepare_config_and_inputs() _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( A__ ,A__ ,unittest.TestCase ): '''simple docstring''' _UpperCAmelCase : int = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) _UpperCAmelCase : List[str] = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : Any = False _UpperCAmelCase : List[str] = False _UpperCAmelCase : Any = False _UpperCAmelCase : List[Any] = False def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = LevitModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def A ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Dict ): '''simple docstring''' return @unittest.skip(reason='Levit does not use inputs_embeds' ) def A ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='Levit does not support input and output embeddings' ) def A ( self : int ): '''simple docstring''' pass @unittest.skip(reason='Levit does not output attentions' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass def A ( self : List[Any] ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCamelCase_ ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def A ( self : Optional[Any] ): '''simple docstring''' def check_hidden_states_output(lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Union[str, Any] ): _snake_case = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) _snake_case = outputs.hidden_states _snake_case = len(self.model_tester.depths ) + 1 self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) _snake_case = (self.model_tester.image_size, self.model_tester.image_size) _snake_case = image_size[0], image_size[1] for _ in range(4 ): _snake_case = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) _snake_case = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A ( self : Optional[Any] ): '''simple docstring''' pass def A ( self : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict=False ): '''simple docstring''' _snake_case = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def A ( self : Dict ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def A ( self : Any ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def A ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase_ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue _snake_case = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() _snake_case = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) _snake_case = model(**UpperCamelCase_ ).loss loss.backward() def A ( self : Dict ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case = False _snake_case = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue _snake_case = model_class(UpperCamelCase_ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase_ ) model.train() _snake_case = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) _snake_case = model(**UpperCamelCase_ ).loss loss.backward() def A ( self : List[Any] ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase_ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ): _snake_case = problem_type['''title'''] _snake_case = problem_type['''num_labels'''] _snake_case = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() _snake_case = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if problem_type["num_labels"] > 1: _snake_case = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) _snake_case = inputs['''labels'''].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase_ ) as warning_list: _snake_case = model(**UpperCamelCase_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def A ( self : Optional[int] ): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = LevitModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def a_ ( ) -> int: _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A ( self : List[Any] ): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : Optional[int] ): '''simple docstring''' _snake_case = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( UpperCamelCase_ ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): _snake_case = model(**UpperCamelCase_ ) # verify the logits _snake_case = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) _snake_case = torch.tensor([1.0448, -0.3745, -1.8317] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
282
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ :str = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :str = min_resolution UpperCamelCase__ :Optional[Any] = max_resolution UpperCamelCase__ :int = do_resize UpperCamelCase__ :Optional[Any] = size UpperCamelCase__ :Tuple = do_normalize UpperCamelCase__ :List[Any] = image_mean UpperCamelCase__ :Dict = image_std UpperCamelCase__ :Union[str, Any] = do_rescale UpperCamelCase__ :Union[str, Any] = rescale_factor UpperCamelCase__ :Union[str, Any] = do_pad def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if not batched: UpperCamelCase__ :List[str] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ :Dict = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ :int = self.size['''shortest_edge'''] UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ :str = self.size['''shortest_edge'''] UpperCamelCase__ :str = self.size['''shortest_edge'''] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.loads(f.read() ) UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target} # encode them UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify orig_size UpperCamelCase__ :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ ) # verify orig_size UpperCamelCase__ :List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
97
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase : Dict =logging.get_logger(__name__) lowerCamelCase : List[Any] ={ '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __a ( A__ ): _lowerCAmelCase : List[str] = '''roberta''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=5_02_65 , SCREAMING_SNAKE_CASE : Tuple=7_68 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : List[str]=30_72 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=5_12 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1e-1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]="absolute" , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ : Dict = vocab_size UpperCamelCase__ : List[str] = hidden_size UpperCamelCase__ : Any = num_hidden_layers UpperCamelCase__ : Dict = num_attention_heads UpperCamelCase__ : Any = hidden_act UpperCamelCase__ : Any = intermediate_size UpperCamelCase__ : List[Any] = hidden_dropout_prob UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCamelCase__ : Optional[Any] = max_position_embeddings UpperCamelCase__ : Tuple = type_vocab_size UpperCamelCase__ : Tuple = initializer_range UpperCamelCase__ : Any = layer_norm_eps UpperCamelCase__ : Tuple = position_embedding_type UpperCamelCase__ : Union[str, Any] = use_cache UpperCamelCase__ : List[str] = classifier_dropout class __a ( A__ ): @property def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
189
'''simple docstring''' from collections import defaultdict class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCamelCase__ :Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1 def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCamelCase__ :Optional[int] = total_ways_util return self.dp[mask][task_no] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": __snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
97
0
'''simple docstring''' import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging A_ = logging.get_logger(__name__) def A_ ( snake_case , snake_case , snake_case ): SCREAMING_SNAKE_CASE:Dict = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__a ) == len(__a ), F'''{len(__a )} != {len(__a )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) A_ = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } A_ = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def A_ ( snake_case , snake_case ): try: SCREAMING_SNAKE_CASE:List[str] = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' F''' {n_student}''' ) return list(range(__a ) ) def A_ ( snake_case , snake_case ): if n_student > n_teacher: raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(__a ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def A_ ( snake_case , snake_case = "student" , snake_case = None , snake_case = None , snake_case=False , snake_case=None , snake_case=None , **snake_case , ): SCREAMING_SNAKE_CASE:Optional[int] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(__a , __a ): AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience SCREAMING_SNAKE_CASE:Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval() else: assert isinstance(__a , __a ), F'''teacher must be a model or string got type {type(__a )}''' SCREAMING_SNAKE_CASE:Optional[int] = teacher.config.to_diff_dict() try: SCREAMING_SNAKE_CASE:Union[str, Any] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: SCREAMING_SNAKE_CASE:Dict = teacher_e if d is None: SCREAMING_SNAKE_CASE:Tuple = teacher_d init_kwargs.update({"encoder_layers": e, "decoder_layers": d} ) except AttributeError: # T5 if hasattr(teacher.config , "num_encoder_layers" ): SCREAMING_SNAKE_CASE:Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: SCREAMING_SNAKE_CASE:int = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: SCREAMING_SNAKE_CASE:List[str] = teacher_e if d is None: SCREAMING_SNAKE_CASE:Dict = teacher_d if hasattr(teacher.config , "num_encoder_layers" ): init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} ) else: init_kwargs.update({"num_layers": e, "num_decoder_layers": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__a ) # Copy weights SCREAMING_SNAKE_CASE:Union[str, Any] = teacher.config_class(**__a ) SCREAMING_SNAKE_CASE:str = AutoModelForSeqaSeqLM.from_config(__a ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. SCREAMING_SNAKE_CASE:Tuple = student.load_state_dict(teacher.state_dict() , strict=__a ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save SCREAMING_SNAKE_CASE:Any = list(range(__a ) ), list(range(__a ) ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' F''' {save_path}''' ) student.save_pretrained(__a ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: SCREAMING_SNAKE_CASE:List[int] = pick_layers_to_copy(__a , __a ) if d_layers_to_copy is None: SCREAMING_SNAKE_CASE:List[int] = pick_layers_to_copy(__a , __a ) try: if hasattr( __a , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __a ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __a ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __a ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __a ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , __a ) copy_layers(teacher.decoder.block , student.decoder.block , __a ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) SCREAMING_SNAKE_CASE:Union[str, Any] = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(__a ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
139
'''simple docstring''' import csv import tweepy # Twitter API credentials __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' def a ( __a ) -> None: '''simple docstring''' UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a ) auth.set_access_token(__a , __a ) UpperCamelCase__ :List[str] = tweepy.API(__a ) # initialize a list to hold all the tweepy Tweets UpperCamelCase__ :Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 ) # save most recent tweets alltweets.extend(__a ) # save the id of the oldest tweet less one UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__a ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates UpperCamelCase__ :Union[str, Any] = api.user_timeline( screen_name=__a , count=200 , max_id=__a ) # save most recent tweets alltweets.extend(__a ) # update the id of the oldest tweet less one UpperCamelCase__ :Tuple = alltweets[-1].id - 1 print(f'''...{len(__a )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f: UpperCamelCase__ :Tuple = csv.writer(__a ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__a ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
97
0
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A__ : Tuple = logging.get_logger(__name__) A__ : str = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __snake_case ( A__ ): _a = '''bart''' _a = ['''past_key_values'''] _a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str , A_ : Any=5_0_2_6_5 , A_ : Optional[Any]=1_0_2_4 , A_ : int=1_2 , A_ : Union[str, Any]=4_0_9_6 , A_ : Any=1_6 , A_ : List[str]=1_2 , A_ : Tuple=4_0_9_6 , A_ : str=1_6 , A_ : List[Any]=0.0 , A_ : List[str]=0.0 , A_ : str="gelu" , A_ : Optional[Any]=1_0_2_4 , A_ : str=0.1 , A_ : int=0.0 , A_ : Any=0.0 , A_ : Dict=0.02 , A_ : Optional[int]=0.0 , A_ : Any=False , A_ : Optional[int]=True , A_ : List[str]=3 , A_ : int=1 , A_ : List[str]=0 , A_ : Optional[Any]=2 , A_ : List[str]=True , A_ : int=2 , A_ : str=2 , **A_ : Dict , ): lowerCAmelCase_ : int = vocab_size lowerCAmelCase_ : List[str] = max_position_embeddings lowerCAmelCase_ : int = d_model lowerCAmelCase_ : Tuple = encoder_ffn_dim lowerCAmelCase_ : List[Any] = encoder_layers lowerCAmelCase_ : int = encoder_attention_heads lowerCAmelCase_ : Optional[Any] = decoder_ffn_dim lowerCAmelCase_ : Optional[int] = decoder_layers lowerCAmelCase_ : List[str] = decoder_attention_heads lowerCAmelCase_ : List[Any] = dropout lowerCAmelCase_ : Dict = attention_dropout lowerCAmelCase_ : Any = activation_dropout lowerCAmelCase_ : int = activation_function lowerCAmelCase_ : List[str] = init_std lowerCAmelCase_ : Optional[Any] = encoder_layerdrop lowerCAmelCase_ : int = decoder_layerdrop lowerCAmelCase_ : str = classifier_dropout lowerCAmelCase_ : str = use_cache lowerCAmelCase_ : Union[str, Any] = encoder_layers lowerCAmelCase_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase_): lowerCAmelCase_ : Optional[int] = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''') class __snake_case ( A__ ): @property def UpperCAmelCase__ ( self : Dict): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase_ : Dict = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: lowerCAmelCase_ : Optional[Any] = {0: '''batch'''} lowerCAmelCase_ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase_ : int = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''') elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase_ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: lowerCAmelCase_ : Optional[Any] = self.num_layers for i in range(UpperCamelCase_): lowerCAmelCase_ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase_ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase_ : int = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ]) return common_inputs @property def UpperCAmelCase__ ( self : int): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase_ : Optional[Any] = super().outputs else: lowerCAmelCase_ : List[str] = super(UpperCamelCase_ , self).outputs if self.use_past: lowerCAmelCase_ : Union[str, Any] = self.num_layers for i in range(UpperCamelCase_): lowerCAmelCase_ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def UpperCAmelCase__ ( self : Dict , A_ : Union[str, Any] , A_ : Optional[Any] = -1 , A_ : Tuple = -1 , A_ : str = False , A_ : Tuple = None , ): lowerCAmelCase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # Generate decoder inputs lowerCAmelCase_ : Optional[Any] = seq_length if not self.use_past else 1 lowerCAmelCase_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) lowerCAmelCase_ : Optional[Any] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase_ : int = dict(**UpperCamelCase_ , **UpperCamelCase_) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch lowerCAmelCase_ : List[Any] = common_inputs['''input_ids'''].shape lowerCAmelCase_ : Optional[int] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase_ : Union[str, Any] = self.num_attention_heads lowerCAmelCase_ : Tuple = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase_ : Any = decoder_seq_length + 3 lowerCAmelCase_ : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase_ : str = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_)] , dim=1) lowerCAmelCase_ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase_ : Any = self.num_layers lowerCAmelCase_ : Optional[int] = min(UpperCamelCase_ , UpperCamelCase_) lowerCAmelCase_ : Any = max(UpperCamelCase_ , UpperCamelCase_) - min_num_layers lowerCAmelCase_ : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(UpperCamelCase_): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_), )) # TODO: test this. lowerCAmelCase_ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(UpperCamelCase_ , UpperCamelCase_): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_))) return common_inputs def UpperCAmelCase__ ( self : Tuple , A_ : Any , A_ : str = -1 , A_ : Tuple = -1 , A_ : str = False , A_ : Dict = None , ): lowerCAmelCase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch lowerCAmelCase_ : List[str] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase_ : List[str] = seqlen + 2 lowerCAmelCase_ : Tuple = self.num_layers lowerCAmelCase_ : Any = self.num_attention_heads lowerCAmelCase_ : List[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase_ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase_ : int = torch.cat( [common_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1) lowerCAmelCase_ : Any = [ (torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(UpperCamelCase_) ] return common_inputs def UpperCAmelCase__ ( self : List[str] , A_ : Any , A_ : Optional[int] = -1 , A_ : Dict = -1 , A_ : Dict = False , A_ : int = None , ): lowerCAmelCase_ : Tuple = compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase_ : Optional[Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase_) lowerCAmelCase_ : Optional[int] = compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase_ : Tuple = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size lowerCAmelCase_ : str = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_)) return common_inputs def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int , A_ : Union[str, Any] = -1 , A_ : List[Any] = -1 , A_ : Union[str, Any] = False , A_ : List[Any] = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase_ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) elif self.task == "causal-lm": lowerCAmelCase_ : Any = self._generate_dummy_inputs_for_causal_lm( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) else: lowerCAmelCase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) return common_inputs def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[Any] , A_ : str , A_ : Tuple , A_ : Any): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase_ : List[Any] = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) else: lowerCAmelCase_ : Any = super(UpperCamelCase_ , self)._flatten_past_key_values_( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
103
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCamelCase__ :List[str] = value else: UpperCamelCase__ :Dict = value return new_state_dict def a ( __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Tuple = in_proj_bias[:256] UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ :Tuple = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Optional[int] = in_proj_bias[:256] UpperCamelCase__ :Tuple = in_proj_weight[256:512, :] UpperCamelCase__ :Dict = in_proj_bias[256:512] UpperCamelCase__ :Any = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ :List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:] def a ( __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :str = image.size UpperCamelCase__ :Optional[Any] = max(__a , __a ) UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ :Dict = target_max_size / current_max_size UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = F.to_tensor(__a ) UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a ( __a , __a , __a ) -> Dict: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ :Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ :Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCamelCase__ :Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val # create HuggingFace model and load state dict UpperCamelCase__ :str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ :List[str] = 15 UpperCamelCase__ :int = 2 UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ :int = idalabel UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ :int = 125 UpperCamelCase__ :List[str] = 6 UpperCamelCase__ :Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ :Dict = idalabel UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ :List[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCamelCase__ :int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a ) UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' ) UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ :Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ :Dict = (1, 15, 3) UpperCamelCase__ :List[Any] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ :Optional[Any] = (1, 125, 7) UpperCamelCase__ :Dict = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCamelCase__ :Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
97
0
'''simple docstring''' from __future__ import annotations import typing from collections import Counter def _A ( snake_case ) -> typing.Counter[int]: _lowercase : typing.Counter[int] = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__a , max_perimeter + 1 ): _lowercase : int = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__a ): _lowercase : Optional[Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _A ( snake_case = 10_00 ) -> int: _lowercase : int = pythagorean_triple(__a ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F'''Perimeter {solution()} has maximum solutions''')
250
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( __a ) -> bool: '''simple docstring''' UpperCamelCase__ :int = int(number**0.5 ) return number == sq * sq def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]: '''simple docstring''' UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den UpperCamelCase__ :int = x_den * y_den * z_den UpperCamelCase__ :int = gcd(__a , __a ) top //= hcf bottom //= hcf return top, bottom def a ( __a = 35 ) -> int: '''simple docstring''' UpperCamelCase__ :set = set() UpperCamelCase__ :int UpperCamelCase__ :Fraction = Fraction(0 ) UpperCamelCase__ :tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 UpperCamelCase__ :int = x_num * y_den + x_den * y_num UpperCamelCase__ :Any = x_den * y_den UpperCamelCase__ :Tuple = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Optional[int] = int(sqrt(__a ) ) UpperCamelCase__ :int = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=-1 UpperCamelCase__ :Tuple = x_num * y_num UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den UpperCamelCase__ :List[str] = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Union[str, Any] = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num UpperCamelCase__ :Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :str = int(sqrt(__a ) ) UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Dict = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :int = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a , __a ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
97
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase: List[str] = logging.get_logger(__name__) _UpperCamelCase: Optional[Any] = { 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class a__ ( A__ ): _lowerCamelCase = 'blip_2_vision_model' def __init__( self : str, lowerCAmelCase : str=1408, lowerCAmelCase : List[Any]=6144, lowerCAmelCase : Optional[Any]=39, lowerCAmelCase : Any=16, lowerCAmelCase : Optional[int]=224, lowerCAmelCase : Optional[int]=14, lowerCAmelCase : List[Any]="gelu", lowerCAmelCase : Any=0.0_0001, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Any=1e-10, lowerCAmelCase : Tuple=True, **lowerCAmelCase : str, ) -> Any: super().__init__(**UpperCamelCase_ ) lowercase : List[Any] = hidden_size lowercase : int = intermediate_size lowercase : Tuple = num_hidden_layers lowercase : Dict = num_attention_heads lowercase : List[str] = patch_size lowercase : Optional[Any] = image_size lowercase : Any = initializer_range lowercase : int = attention_dropout lowercase : str = layer_norm_eps lowercase : Optional[int] = hidden_act lowercase : List[str] = qkv_bias @classmethod def lowercase ( cls : Optional[int], lowerCAmelCase : Dict, **lowerCAmelCase : Tuple ) -> Any: cls._set_token_in_kwargs(UpperCamelCase_ ) lowercase : List[Any] = cls.get_config_dict(UpperCamelCase_, **UpperCamelCase_ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": lowercase : Tuple = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_, **UpperCamelCase_ ) class a__ ( A__ ): _lowerCamelCase = 'blip_2_qformer' def __init__( self : str, lowerCAmelCase : Optional[Any]=30522, lowerCAmelCase : List[Any]=768, lowerCAmelCase : Dict=12, lowerCAmelCase : Any=12, lowerCAmelCase : List[Any]=3072, lowerCAmelCase : int="gelu", lowerCAmelCase : Any=0.1, lowerCAmelCase : List[str]=0.1, lowerCAmelCase : List[str]=512, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : List[str]=1e-12, lowerCAmelCase : Optional[int]=0, lowerCAmelCase : Union[str, Any]="absolute", lowerCAmelCase : Optional[int]=2, lowerCAmelCase : Union[str, Any]=1408, **lowerCAmelCase : List[Any], ) -> Dict: super().__init__(pad_token_id=UpperCamelCase_, **UpperCamelCase_ ) lowercase : str = vocab_size lowercase : Tuple = hidden_size lowercase : List[Any] = num_hidden_layers lowercase : List[str] = num_attention_heads lowercase : Dict = hidden_act lowercase : Any = intermediate_size lowercase : Optional[Any] = hidden_dropout_prob lowercase : List[str] = attention_probs_dropout_prob lowercase : str = max_position_embeddings lowercase : Union[str, Any] = initializer_range lowercase : Dict = layer_norm_eps lowercase : Union[str, Any] = position_embedding_type lowercase : str = cross_attention_frequency lowercase : List[str] = encoder_hidden_size @classmethod def lowercase ( cls : str, lowerCAmelCase : Union[str, Any], **lowerCAmelCase : int ) -> Union[str, Any]: cls._set_token_in_kwargs(UpperCamelCase_ ) lowercase : str = cls.get_config_dict(UpperCamelCase_, **UpperCamelCase_ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": lowercase : Any = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_, **UpperCamelCase_ ) class a__ ( A__ ): _lowerCamelCase = 'blip-2' _lowerCamelCase = True def __init__( self : Any, lowerCAmelCase : Any=None, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : Dict=None, lowerCAmelCase : Dict=32, **lowerCAmelCase : Tuple ) -> Any: super().__init__(**UpperCamelCase_ ) if vision_config is None: lowercase : Optional[int] = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: lowercase : int = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: lowercase : List[str] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) lowercase : Any = BlipaVisionConfig(**UpperCamelCase_ ) lowercase : List[str] = BlipaQFormerConfig(**UpperCamelCase_ ) lowercase : Optional[int] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' lowercase : Any = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) lowercase : List[str] = self.text_config.tie_word_embeddings lowercase : int = self.text_config.is_encoder_decoder lowercase : List[str] = num_query_tokens lowercase : Optional[int] = self.vision_config.hidden_size lowercase : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowercase : Optional[Any] = 1.0 lowercase : List[Any] = 0.02 @classmethod def lowercase ( cls : List[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : List[Any], **lowerCAmelCase : List[Any], ) -> Dict: return cls( vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **UpperCamelCase_, ) def lowercase ( self : str ) -> Optional[int]: lowercase : str = copy.deepcopy(self.__dict__ ) lowercase : List[str] = self.vision_config.to_dict() lowercase : Dict = self.qformer_config.to_dict() lowercase : List[Any] = self.text_config.to_dict() lowercase : Dict = self.__class__.model_type return output
255
'''simple docstring''' def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Optional[int] = [] UpperCamelCase__ :int = 1 while len(__a ) < 1e6: constant.append(str(__a ) ) i += 1 UpperCamelCase__ :Union[str, Any] = ''''''.join(__a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
97
0
"""simple docstring""" def lowercase ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] ) ->str: """simple docstring""" if height >= 1: move_tower(height - 1 , __a , __a , __a ) move_disk(__a , __a ) move_tower(height - 1 , __a , __a , __a ) def lowercase ( _snake_case : Optional[Any] , _snake_case : List[Any] ) ->str: """simple docstring""" print('''moving disk from''' , __a , '''to''' , __a ) def lowercase ( ) ->Optional[Any]: """simple docstring""" __snake_case : Optional[int] = int(input('''Height of hanoi: ''' ).strip() ) move_tower(__a , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
102
'''simple docstring''' from PIL import Image def a ( __a , __a ) -> Image: '''simple docstring''' def brightness(__a ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 __snake_case = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
97
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging A__ : List[Any] = logging.get_logger(__name__) A__ : Dict = {'vocab_file': 'spiece.model'} A__ : Tuple = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } A__ : Optional[int] = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) A__ : Union[str, Any] = 0 A__ : Dict = 1 A__ : List[Any] = 2 A__ : str = 3 A__ : str = 4 class lowercase__ ( A__ ): _UpperCAmelCase :List[str] = VOCAB_FILES_NAMES _UpperCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :List[str] = "left" def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=False , snake_case__ : Tuple=True , snake_case__ : List[Any]=False , snake_case__ : Any="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]="<sep>" , snake_case__ : Any="<pad>" , snake_case__ : List[str]="<cls>" , snake_case__ : str="<mask>" , snake_case__ : Tuple=["<eop>", "<eod>"] , snake_case__ : Optional[int] = None , **snake_case__ : Dict , ): lowerCamelCase_ : Dict =AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowerCamelCase_ : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowerCamelCase_ : List[Any] =3 lowerCamelCase_ : Any =do_lower_case lowerCamelCase_ : int =remove_space lowerCamelCase_ : List[Any] =keep_accents lowerCamelCase_ : Optional[int] =vocab_file lowerCamelCase_ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase_ ) @property def UpperCAmelCase__ ( self : Any ): return len(self.sp_model ) def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Dict ={self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ): lowerCamelCase_ : Optional[Any] =self.__dict__.copy() lowerCamelCase_ : List[Any] =None return state def __setstate__( self : Tuple , snake_case__ : Optional[int] ): lowerCamelCase_ : str =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ : Optional[int] ={} lowerCamelCase_ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] ): if self.remove_space: lowerCamelCase_ : Tuple =''' '''.join(inputs.strip().split() ) else: lowerCamelCase_ : Tuple =inputs lowerCamelCase_ : str =outputs.replace("``" , "\"" ).replace("\'\'" , "\"" ) if not self.keep_accents: lowerCamelCase_ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase_ ) lowerCamelCase_ : Any =''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] ) if self.do_lower_case: lowerCamelCase_ : List[Any] =outputs.lower() return outputs def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] ): lowerCamelCase_ : List[str] =self.preprocess_text(UpperCamelCase_ ) lowerCamelCase_ : str =self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) lowerCamelCase_ : Optional[int] =[] for piece in pieces: if len(UpperCamelCase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ : Union[str, Any] =cur_pieces[1:] else: lowerCamelCase_ : Optional[Any] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase_ ) else: new_pieces.append(UpperCamelCase_ ) return new_pieces def UpperCAmelCase__ ( self : Tuple , snake_case__ : Union[str, Any] ): return self.sp_model.PieceToId(UpperCamelCase_ ) def UpperCAmelCase__ ( self : Dict , snake_case__ : Union[str, Any] ): return self.sp_model.IdToPiece(UpperCamelCase_ ) def UpperCAmelCase__ ( self : Any , snake_case__ : str ): lowerCamelCase_ : str =''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip() return out_string def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[Any] , snake_case__ : str = False , snake_case__ : Optional[int] = None , snake_case__ : List[Any] = True , **snake_case__ : List[str] , ): lowerCamelCase_ : Optional[Any] =kwargs.pop("use_source_tokenizer" , UpperCamelCase_ ) lowerCamelCase_ : List[Any] =self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase_ : Union[str, Any] =[] lowerCamelCase_ : List[str] =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) ) lowerCamelCase_ : List[str] =[] sub_texts.append(UpperCamelCase_ ) else: current_sub_text.append(UpperCamelCase_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase_ : str =''''''.join(UpperCamelCase_ ) lowerCamelCase_ : int =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase_ : List[Any] =self.clean_up_tokenization(UpperCamelCase_ ) return clean_text else: return text def UpperCAmelCase__ ( self : int , snake_case__ : Dict , snake_case__ : str = None ): lowerCamelCase_ : str =[self.sep_token_id] lowerCamelCase_ : Any =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCAmelCase__ ( self : Any , snake_case__ : List[str] , snake_case__ : Tuple = None , snake_case__ : str = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] return ([0] * len(UpperCamelCase_ )) + [1, 1] def UpperCAmelCase__ ( self : Any , snake_case__ : List[str] , snake_case__ : str = None ): lowerCamelCase_ : Optional[int] =[self.sep_token_id] lowerCamelCase_ : List[str] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[Any] , snake_case__ : str = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : Optional[int] =os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , "wb" ) as fi: lowerCamelCase_ : Dict =self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
144
'''simple docstring''' from datetime import datetime as dt import os from github import Github __snake_case = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a ( ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' ) UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
97
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A__: str = logging.get_logger(__name__) A__: List[str] = {'''vocab_file''': '''spm_char.model'''} A__: int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } A__: List[str] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class A__ ( A__ ): __UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES __UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = ["input_ids", "attention_mask"] def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str]="<s>" , SCREAMING_SNAKE_CASE :Optional[int]="</s>" , SCREAMING_SNAKE_CASE :int="<unk>" , SCREAMING_SNAKE_CASE :str="<pad>" , SCREAMING_SNAKE_CASE :Union[str, Any] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> Optional[int]: '''simple docstring''' _a : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) _a : Any =vocab_file _a : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase_ ) @property def __UpperCAmelCase ( self :str ) -> Optional[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def __UpperCAmelCase ( self :Tuple ) -> List[Any]: '''simple docstring''' _a : Optional[Any] ={self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self :Optional[Any] ) -> List[Any]: '''simple docstring''' _a : Tuple =self.__dict__.copy() _a : Union[str, Any] =None return state def __setstate__( self :Optional[int] , SCREAMING_SNAKE_CASE :Dict ) -> Dict: '''simple docstring''' _a : str =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Optional[Any] ={} _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]: '''simple docstring''' return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.sp_model.piece_to_id(UpperCamelCase_ ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]: '''simple docstring''' _a : Dict =self.sp_model.IdToPiece(UpperCamelCase_ ) return token def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]: '''simple docstring''' _a : Any =[] _a : Optional[Any] ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_ ) + token _a : Optional[Any] =[] else: current_sub_tokens.append(UpperCamelCase_ ) out_string += self.sp_model.decode(UpperCamelCase_ ) return out_string.strip() def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] = None , SCREAMING_SNAKE_CASE :List[str] = False ) -> Any: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) _a : Optional[Any] =[1] if token_ids_a is None: return ([0] * len(UpperCamelCase_ )) + suffix_ones return ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict = None ) -> Union[str, Any]: '''simple docstring''' if not os.path.isdir(UpperCamelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , """wb""" ) as fi: _a : List[Any] =self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
276
'''simple docstring''' import re from filelock import FileLock try: import nltk __snake_case = True except (ImportError, ModuleNotFoundError): __snake_case = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def a ( __a ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
97
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) UpperCAmelCase_ = CLIPImageProcessor() UpperCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') UpperCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
12
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a ) if save_dir is None: UpperCamelCase__ :Any = f'''{dataset}-{pair}''' UpperCamelCase__ :Dict = Path(__a ) save_dir.mkdir(exist_ok=__a ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' ) UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' ) UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' ) UpperCamelCase__ :Tuple = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): UpperCamelCase__ :Union[str, Any] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
97
0
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __magic_name__ ( A__ ): '''simple docstring''' lowerCamelCase__ : Dict = ComputeEnvironment.AMAZON_SAGEMAKER lowerCamelCase__ : Tuple = True lowerCamelCase__ : Tuple = 'ml.p3.2xlarge' lowerCamelCase__ : Tuple = 'accelerate_sagemaker_execution_role' lowerCamelCase__ : str = 'hf-sm' lowerCamelCase__ : Union[str, Any] = 'us-east-1' lowerCamelCase__ : Union[str, Any] = 1 lowerCamelCase__ : List[Any] = 'accelerate-sagemaker-1' lowerCamelCase__ : Dict = '1.6' lowerCamelCase__ : Optional[Any] = '4.4' lowerCamelCase__ : Optional[int] = 'train.py' lowerCamelCase__ : Optional[Any] = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] lowerCamelCase__ : List[str] = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" a__ =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''], UpperCamelCase_ ) assert isinstance(converted_args['''do_train'''], UpperCamelCase_ ) assert isinstance(converted_args['''epochs'''], UpperCamelCase_ ) assert isinstance(converted_args['''learning_rate'''], UpperCamelCase_ ) assert isinstance(converted_args['''max_steps'''], UpperCamelCase_ ) with pytest.raises(UpperCamelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
188
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''DPTFeatureExtractor'''] __snake_case = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
97
0
def a_ ( __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[str]=False ) -> str: if isinstance(__a , __a ) and isinstance(__a , __a ): _snake_case = len(set_a.intersection(__a ) ) if alternative_union: _snake_case = len(__a ) + len(__a ) else: _snake_case = len(set_a.union(__a ) ) return intersection / union if isinstance(__a , (list, tuple) ) and isinstance(__a , (list, tuple) ): _snake_case = [element for element in set_a if element in set_b] if alternative_union: _snake_case = len(__a ) + len(__a ) return len(__a ) / union else: _snake_case = set_a + [element for element in set_b if element not in set_a] return len(__a ) / len(__a ) return len(__a ) / len(__a ) return None if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = {'''a''', '''b''', '''c''', '''d''', '''e'''} _lowerCamelCase : str = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
282
'''simple docstring''' def a ( __a , __a ) -> int: '''simple docstring''' if len(__a ) != len(__a ): raise ValueError('''String lengths must match!''' ) UpperCamelCase__ :Union[str, Any] = 0 for chara, chara in zip(__a , __a ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
97
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Optional[int] ={'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str =['''ViTFeatureExtractor'''] lowerCamelCase : Tuple =['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int =[ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple =[ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] =[ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCamelCase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
189
'''simple docstring''' def a ( __a ) -> "list[int]": '''simple docstring''' if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCamelCase__ :int = 1 if upper_limit > 0: UpperCamelCase__ :int = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__a ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: __snake_case = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
97
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig A_ = { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json", } class _snake_case ( A__ ): _A : Optional[int] = '''albert''' def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any]=30_000 ,SCREAMING_SNAKE_CASE__ : Optional[int]=128 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_096 ,SCREAMING_SNAKE_CASE__ : int=12 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=16_384 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu_new" ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : str=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1e-12 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]="absolute" ,SCREAMING_SNAKE_CASE__ : List[Any]=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,): super().__init__(pad_token_id=UpperCamelCase_ ,bos_token_id=UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ,**UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Tuple = vocab_size SCREAMING_SNAKE_CASE:int = embedding_size SCREAMING_SNAKE_CASE:int = hidden_size SCREAMING_SNAKE_CASE:Dict = num_hidden_layers SCREAMING_SNAKE_CASE:Tuple = num_hidden_groups SCREAMING_SNAKE_CASE:List[Any] = num_attention_heads SCREAMING_SNAKE_CASE:Any = inner_group_num SCREAMING_SNAKE_CASE:str = hidden_act SCREAMING_SNAKE_CASE:Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE:List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE:Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE:int = max_position_embeddings SCREAMING_SNAKE_CASE:str = type_vocab_size SCREAMING_SNAKE_CASE:str = initializer_range SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps SCREAMING_SNAKE_CASE:int = classifier_dropout_prob SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type class _snake_case ( A__ ): @property def __UpperCamelCase ( self : str ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE:Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE:Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
139
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a ( __a , __a ) -> Optional[int]: '''simple docstring''' assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase__ :Tuple = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def a ( __a , __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :int = tmp_path / '''cache''' UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} UpperCamelCase__ :Any = features.copy() if features else default_expected_features UpperCamelCase__ :Union[str, Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} UpperCamelCase__ :int = features.copy() UpperCamelCase__ :List[Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Optional[int] = tmp_path / '''cache''' UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_json_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' if issubclass(__a , __a ): UpperCamelCase__ :Union[str, Any] = jsonl_path elif issubclass(__a , __a ): UpperCamelCase__ :int = [jsonl_path] UpperCamelCase__ :Dict = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) def a ( __a , __a , __a=("train",) ) -> Optional[Any]: '''simple docstring''' assert isinstance(__a , __a ) for split in splits: UpperCamelCase__ :Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features UpperCamelCase__ :str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> str: '''simple docstring''' if split: UpperCamelCase__ :List[str] = {split: jsonl_path} else: UpperCamelCase__ :int = '''train''' UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path} UpperCamelCase__ :Any = tmp_path / '''cache''' UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def a ( __a ) -> Union[str, Any]: '''simple docstring''' return json.load(__a ) def a ( __a ) -> int: '''simple docstring''' return [json.loads(__a ) for line in buffer] class lowercase : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :int = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' with pytest.raises(UpperCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :Dict = f.read() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :int = f.read() assert exported_content == original_content
97
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A__ : List[str] = logging.get_logger(__name__) A__ : List[str] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class __snake_case ( A__ ): _a = '''layoutlmv3''' def __init__( self : str , A_ : Tuple=5_0_2_6_5 , A_ : str=7_6_8 , A_ : int=1_2 , A_ : List[Any]=1_2 , A_ : int=3_0_7_2 , A_ : List[str]="gelu" , A_ : List[str]=0.1 , A_ : Any=0.1 , A_ : Optional[int]=5_1_2 , A_ : str=2 , A_ : Dict=0.02 , A_ : Any=1e-5 , A_ : Optional[int]=1 , A_ : List[str]=0 , A_ : List[Any]=2 , A_ : Dict=1_0_2_4 , A_ : List[str]=1_2_8 , A_ : Optional[int]=1_2_8 , A_ : Any=True , A_ : Union[str, Any]=3_2 , A_ : Dict=1_2_8 , A_ : Optional[Any]=6_4 , A_ : Union[str, Any]=2_5_6 , A_ : Dict=True , A_ : Tuple=True , A_ : Any=True , A_ : Optional[int]=2_2_4 , A_ : Tuple=3 , A_ : int=1_6 , A_ : Any=None , **A_ : str , ): super().__init__( vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase_ : Tuple = max_ad_position_embeddings lowerCAmelCase_ : List[Any] = coordinate_size lowerCAmelCase_ : Union[str, Any] = shape_size lowerCAmelCase_ : int = has_relative_attention_bias lowerCAmelCase_ : Any = rel_pos_bins lowerCAmelCase_ : Tuple = max_rel_pos lowerCAmelCase_ : Optional[Any] = has_spatial_attention_bias lowerCAmelCase_ : Union[str, Any] = rel_ad_pos_bins lowerCAmelCase_ : Union[str, Any] = max_rel_ad_pos lowerCAmelCase_ : Dict = text_embed lowerCAmelCase_ : Optional[Any] = visual_embed lowerCAmelCase_ : Dict = input_size lowerCAmelCase_ : List[Any] = num_channels lowerCAmelCase_ : Optional[int] = patch_size lowerCAmelCase_ : str = classifier_dropout class __snake_case ( A__ ): _a = version.parse('''1.12''' ) @property def UpperCAmelCase__ ( self : Any): if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) else: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}), ]) @property def UpperCAmelCase__ ( self : str): return 1e-5 @property def UpperCAmelCase__ ( self : Dict): return 1_2 def UpperCAmelCase__ ( self : Union[str, Any] , A_ : List[str] , A_ : Union[str, Any] = -1 , A_ : Tuple = -1 , A_ : str = False , A_ : Dict = None , A_ : List[str] = 3 , A_ : Any = 4_0 , A_ : Union[str, Any] = 4_0 , ): setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase_ : Optional[int] = compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase_ : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_) lowerCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase_ : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size # Generate dummy bounding boxes lowerCAmelCase_ : str = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) lowerCAmelCase_ : Optional[Any] = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) lowerCAmelCase_ : Any = dict( processor( UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , )) return inputs
103
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowercase ( A__ ): """simple docstring""" _a = ComputeEnvironment.AMAZON_SAGEMAKER _a = True _a = 'ml.p3.2xlarge' _a = 'accelerate_sagemaker_execution_role' _a = 'hf-sm' _a = 'us-east-1' _a = 1 _a = 'accelerate-sagemaker-1' _a = '1.6' _a = '4.4' _a = 'train.py' _a = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] _a = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ ) assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ ) assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ ) assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ ) assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ ) with pytest.raises(UpperCamelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
97
0
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class a__ : def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=2 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=36 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=6 , _UpperCamelCase=6 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , _UpperCamelCase=1000 , ): """simple docstring""" _lowercase : List[str] = parent _lowercase : Dict = batch_size _lowercase : List[Any] = num_channels _lowercase : int = image_size _lowercase : str = patch_size _lowercase : Optional[Any] = text_seq_length _lowercase : str = is_training _lowercase : List[str] = use_input_mask _lowercase : Any = use_token_type_ids _lowercase : List[Any] = use_labels _lowercase : List[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Tuple = intermediate_size _lowercase : Any = hidden_act _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Dict = type_vocab_size _lowercase : Optional[int] = type_sequence_label_size _lowercase : List[Any] = initializer_range _lowercase : Any = coordinate_size _lowercase : Optional[Any] = shape_size _lowercase : Any = num_labels _lowercase : Union[str, Any] = num_choices _lowercase : Any = scope _lowercase : List[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _lowercase : Optional[int] = text_seq_length _lowercase : str = (image_size // patch_size) ** 2 + 1 _lowercase : Optional[int] = self.text_seq_length + self.image_seq_length def _lowerCamelCase ( self ): """simple docstring""" _lowercase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _lowercase : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowercase : int = bbox[i, j, 3] _lowercase : Union[str, Any] = bbox[i, j, 1] _lowercase : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowercase : List[str] = bbox[i, j, 2] _lowercase : Union[str, Any] = bbox[i, j, 0] _lowercase : List[Any] = t _lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : List[Any] = None if self.use_input_mask: _lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _lowercase : Union[str, Any] = None _lowercase : Tuple = None if self.use_labels: _lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _lowercase : Tuple = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : Union[str, Any] = LayoutLMvaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # text + image _lowercase : List[str] = model(UpperCamelCase_ , pixel_values=UpperCamelCase_ ) _lowercase : int = model( UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) _lowercase : Union[str, Any] = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) _lowercase : str = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _lowercase : Dict = model(UpperCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _lowercase : List[str] = model(pixel_values=UpperCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : Optional[Any] = self.num_labels _lowercase : Tuple = LayoutLMvaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : int = model( UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : Union[str, Any] = self.num_labels _lowercase : int = LayoutLMvaForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Tuple = model( UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : Optional[int] = LayoutLMvaForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : int = model( UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Tuple = self.prepare_config_and_inputs() ( _lowercase ) : Any = config_and_inputs _lowercase : Optional[int] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class a__ ( A__ , A__ , unittest.TestCase ): _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Optional[int] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : List[str] = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" return True def _lowerCamelCase ( self ): """simple docstring""" _lowercase : str = LayoutLMvaModelTester(self ) _lowercase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ): """simple docstring""" _lowercase : int = copy.deepcopy(UpperCamelCase_ ) if model_class in get_values(UpperCamelCase_ ): _lowercase : Dict = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCamelCase_ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCamelCase_ ): _lowercase : Optional[int] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) elif model_class in get_values(UpperCamelCase_ ): _lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) _lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) elif model_class in [ *get_values(UpperCamelCase_ ), ]: _lowercase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) elif model_class in [ *get_values(UpperCamelCase_ ), ]: _lowercase : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase_ , ) return inputs_dict def _lowerCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowercase : Optional[Any] = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) @slow def _lowerCamelCase ( self ): """simple docstring""" for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Optional[Any] = LayoutLMvaModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def _A ( ) -> str: _lowercase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class a__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) if is_vision_available() else None @slow def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[Any] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(UpperCamelCase_ ) _lowercase : List[str] = self.default_image_processor _lowercase : Optional[Any] = prepare_img() _lowercase : List[str] = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).pixel_values.to(UpperCamelCase_ ) _lowercase : List[str] = torch.tensor([[1, 2]] ) _lowercase : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _lowercase : str = model( input_ids=input_ids.to(UpperCamelCase_ ) , bbox=bbox.to(UpperCamelCase_ ) , pixel_values=pixel_values.to(UpperCamelCase_ ) , ) # verify the logits _lowercase : Tuple = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ ) _lowercase : Tuple = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
250
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def a ( __a ) -> int: '''simple docstring''' for param in module.parameters(): UpperCamelCase__ :Dict = False def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCamelCase__ :Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Dict = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def a ( ) -> str: '''simple docstring''' UpperCamelCase__ :int = datetime.now() UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' ) return timestamp
97
0
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _UpperCamelCase: str = logging.get_logger(__name__) @add_end_docstrings(A__ ) class a__ ( A__ ): def __init__( self : Optional[Any], **lowerCAmelCase : Tuple ) -> Dict: super().__init__(**UpperCamelCase_ ) requires_backends(self, 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int, lowerCAmelCase : str, **lowerCAmelCase : str ) -> List[Any]: return super().__call__(UpperCamelCase_, **UpperCamelCase_ ) def lowercase ( self : Any, **lowerCAmelCase : str ) -> Tuple: lowercase : Union[str, Any] = {} if "candidate_labels" in kwargs: lowercase : int = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowercase : Dict = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase ( self : int, lowerCAmelCase : List[str], lowerCAmelCase : Dict=None, lowerCAmelCase : Union[str, Any]="This is a photo of {}." ) -> Tuple: lowercase : List[Any] = load_image(UpperCamelCase_ ) lowercase : List[Any] = self.image_processor(images=[image], return_tensors=self.framework ) lowercase : List[Any] = candidate_labels lowercase : Optional[int] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] lowercase : List[Any] = self.tokenizer(UpperCamelCase_, return_tensors=self.framework, padding=UpperCamelCase_ ) lowercase : Any = [text_inputs] return inputs def lowercase ( self : Any, lowerCAmelCase : int ) -> str: lowercase : Optional[int] = model_inputs.pop('candidate_labels' ) lowercase : Any = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0], UpperCamelCase_ ): lowercase : Dict = text_inputs[0] else: # Batching case. lowercase : Any = text_inputs[0][0] lowercase : str = self.model(**UpperCamelCase_, **UpperCamelCase_ ) lowercase : Optional[Any] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def lowercase ( self : List[str], lowerCAmelCase : List[Any] ) -> Optional[Any]: lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) lowercase : List[Any] = model_outputs['''logits'''][0] if self.framework == "pt": lowercase : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase : str = probs.tolist() if not isinstance(UpperCamelCase_, UpperCamelCase_ ): lowercase : Optional[int] = [scores] elif self.framework == "tf": lowercase : Any = stable_softmax(UpperCamelCase_, axis=-1 ) lowercase : Tuple = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase : str = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_, UpperCamelCase_ ), key=lambda lowerCAmelCase : -x[0] ) ] return result
255
'''simple docstring''' from scipy.stats import pearsonr import datasets __snake_case = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_pvalue: UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
97
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =StableDiffusionLDMaDPipeline lowerCamelCase__ =TEXT_TO_IMAGE_PARAMS lowerCamelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __snake_case : Tuple = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) __snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __snake_case : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __snake_case : Optional[int] = CLIPTextModel(UpperCamelCase_ ) __snake_case : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case : Any = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ): '''simple docstring''' if str(UpperCamelCase_ ).startswith('''mps''' ): __snake_case : Dict = torch.manual_seed(UpperCamelCase_ ) else: __snake_case : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __snake_case : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : List[Any] = self.get_dummy_components() __snake_case : Any = StableDiffusionLDMaDPipeline(**UpperCamelCase_ ) __snake_case : str = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __snake_case : int = self.get_dummy_inputs(UpperCamelCase_ ) __snake_case : str = ldmad_pipe(**UpperCamelCase_ ) __snake_case : Union[str, Any] = output.rgb, output.depth __snake_case : List[Any] = rgb[0, -3:, -3:, -1] __snake_case : Optional[int] = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) __snake_case : Any = np.array( [0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] ) __snake_case : Optional[int] = np.array([103.4_6727, 85.81_2004, 87.84_9236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.get_dummy_components() __snake_case : Optional[Any] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ ) __snake_case : Dict = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __snake_case : str = self.get_dummy_inputs(UpperCamelCase_ ) __snake_case : Optional[Any] = 3 * [inputs['''prompt''']] # forward __snake_case : int = ldmad_pipe(**UpperCamelCase_ ) __snake_case : Tuple = output.rgb, output.depth __snake_case : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1] __snake_case : Union[str, Any] = depth_slice_a[0, -3:, -1] __snake_case : Dict = self.get_dummy_inputs(UpperCamelCase_ ) __snake_case : Optional[Any] = 3 * [inputs.pop('''prompt''' )] __snake_case : str = ldmad_pipe.tokenizer( UpperCamelCase_ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , ) __snake_case : Optional[Any] = text_inputs['''input_ids'''].to(UpperCamelCase_ ) __snake_case : str = ldmad_pipe.text_encoder(UpperCamelCase_ )[0] __snake_case : Optional[int] = prompt_embeds # forward __snake_case : Tuple = ldmad_pipe(**UpperCamelCase_ ) __snake_case : int = output.rgb, output.depth __snake_case : List[Any] = rgb_slice_a[0, -3:, -3:, -1] __snake_case : List[Any] = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) __snake_case : List[str] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ ) __snake_case : int = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __snake_case : List[Any] = self.get_dummy_inputs(UpperCamelCase_ ) __snake_case : Optional[int] = '''french fries''' __snake_case : List[Any] = ldmad_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ ) __snake_case : List[Any] = output.rgb, output.depth __snake_case : Tuple = rgb[0, -3:, -3:, -1] __snake_case : Any = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) __snake_case : Union[str, Any] = np.array( [0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] ) __snake_case : List[str] = np.array([107.8_4738, 84.6_2802, 89.96_2135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE (self , a_ , a_="cpu" , a_=torch.floataa , a_=0 ): '''simple docstring''' __snake_case : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __snake_case : int = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) ) __snake_case : Any = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) __snake_case : Optional[Any] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ) __snake_case : int = ldmad_pipe.to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __snake_case : List[Any] = self.get_inputs(UpperCamelCase_ ) __snake_case : Tuple = ldmad_pipe(**UpperCamelCase_ ) __snake_case : List[str] = output.rgb, output.depth __snake_case : List[str] = rgb[0, -3:, -3:, -1].flatten() __snake_case : Optional[int] = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) __snake_case : List[str] = np.array( [0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] ) __snake_case : Optional[int] = np.array( [0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE (self , a_ , a_="cpu" , a_=torch.floataa , a_=0 ): '''simple docstring''' __snake_case : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __snake_case : List[str] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) ) __snake_case : Any = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) __snake_case : List[str] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __snake_case : Any = self.get_inputs(UpperCamelCase_ ) __snake_case : Optional[int] = ldmad_pipe(**UpperCamelCase_ ) __snake_case : Union[str, Any] = output.rgb, output.depth __snake_case : Union[str, Any] = 0.49_5586 __snake_case : List[str] = 0.3379_5515 __snake_case : Tuple = 112.4_8518 __snake_case : int = 98.48_9746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase_ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __snake_case : Dict = self.get_inputs(UpperCamelCase_ ) __snake_case : Any = ldmad_pipe(**UpperCamelCase_ ) __snake_case : Dict = output.rgb, output.depth __snake_case : int = 0.419_4127 __snake_case : Optional[int] = 0.3537_5586 __snake_case : int = 0.563_8502 __snake_case : Optional[int] = 0.3468_6103 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
102
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case = logging.get_logger(__name__) __snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __snake_case = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __snake_case = { '''facebook/blenderbot_small-90M''': 512, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Union[str, Any] = add_prefix_space def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.sep_token_id] UpperCamelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
97
0
"""simple docstring""" from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
144
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
97
0
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: A__: List[str] = False A__: str = logging.get_logger(__name__) A__: Optional[Any] = '''ybelkada/fonts''' def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " """Pix2StructImageProcessor. Please upgrade torch.""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ) -> Optional[int]: requires_backends(__a ,["""torch"""] ) _check_torch_version() _a : Tuple =image_tensor.unsqueeze(0 ) _a : Optional[int] =torch.nn.functional.unfold(__a ,(patch_height, patch_width) ,stride=(patch_height, patch_width) ) _a : Union[str, Any] =patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,__a ,__a ,-1 ) _a : List[Any] =patches.permute(0 ,4 ,2 ,3 ,1 ).reshape( image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,) return patches.unsqueeze(0 ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] = 36 ,_UpperCAmelCase : Tuple = "black" ,_UpperCAmelCase : Optional[int] = "white" ,_UpperCAmelCase : str = 5 ,_UpperCAmelCase : str = 5 ,_UpperCAmelCase : Any = 5 ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : Tuple = None ,_UpperCAmelCase : Dict = None ,) -> Image.Image: requires_backends(__a ,"""vision""" ) # Add new lines so that each line is no more than 80 characters. _a : Union[str, Any] =textwrap.TextWrapper(width=80 ) _a : int =wrapper.wrap(text=__a ) _a : Union[str, Any] ='''\n'''.join(__a ) if font_bytes is not None and font_path is None: _a : str =io.BytesIO(__a ) elif font_path is not None: _a : int =font_path else: _a : int =hf_hub_download(__a ,"""Arial.TTF""" ) _a : List[str] =ImageFont.truetype(__a ,encoding="""UTF-8""" ,size=__a ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. _a : Optional[int] =ImageDraw.Draw(Image.new("""RGB""" ,(1, 1) ,__a ) ) _a : List[str] =temp_draw.textbbox((0, 0) ,__a ,__a ) # Create the actual image with a bit of padding around the text. _a : List[Any] =text_width + left_padding + right_padding _a : int =text_height + top_padding + bottom_padding _a : str =Image.new("""RGB""" ,(image_width, image_height) ,__a ) _a : Tuple =ImageDraw.Draw(__a ) draw.text(xy=(left_padding, top_padding) ,text=__a ,fill=__a ,font=__a ) return image def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Any ,**_UpperCAmelCase : int ) -> Optional[Any]: requires_backends(__a ,"""vision""" ) # Convert to PIL image if necessary _a : Optional[int] =to_pil_image(__a ) _a : List[str] =render_text(__a ,**__a ) _a : List[str] =max(header_image.width ,image.width ) _a : List[str] =int(image.height * (new_width / image.width) ) _a : Dict =int(header_image.height * (new_width / header_image.width) ) _a : Dict =Image.new("""RGB""" ,(new_width, new_height + new_header_height) ,"""white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) ) new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) ) # Convert back to the original framework if necessary _a : Optional[int] =to_numpy_array(__a ) if infer_channel_dimension_format(__a ) == ChannelDimension.LAST: _a : int =to_channel_dimension_format(__a ,ChannelDimension.LAST ) return new_image class A__ ( A__ ): __UpperCamelCase : Tuple = ["flattened_patches"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Optional[Any] = True , SCREAMING_SNAKE_CASE :Dict = True , SCREAMING_SNAKE_CASE :Tuple = None , SCREAMING_SNAKE_CASE :List[Any] = 2_0_4_8 , SCREAMING_SNAKE_CASE :List[Any] = False , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _a : List[Any] =patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6} _a : Optional[int] =do_normalize _a : List[str] =do_convert_rgb _a : Union[str, Any] =max_patches _a : Optional[int] =is_vqa def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :Any ) -> int: '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch _a : Union[str, Any] =to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.FIRST ) _a : Dict =torch.from_numpy(UpperCamelCase_ ) _a : Tuple =patch_size['''height'''], patch_size['''width'''] _a : Any =get_image_size(UpperCamelCase_ ) # maximize scale s.t. _a : Tuple =math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) _a : Tuple =max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase_ ) , 1 ) _a : Optional[Any] =max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase_ ) , 1 ) _a : str =max(num_feasible_rows * patch_height , 1 ) _a : Optional[int] =max(num_feasible_cols * patch_width , 1 ) _a : Optional[int] =torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase_ , antialias=UpperCamelCase_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] _a : Optional[int] =torch_extract_patches(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _a : List[Any] =patches.shape _a : Dict =patches_shape[1] _a : int =patches_shape[2] _a : List[str] =patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] _a : int =patches.reshape([rows * columns, depth] ) # [rows * columns, 1] _a : Union[str, Any] =torch.arange(UpperCamelCase_ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase_ ).reshape([rows * columns, 1] ) _a : str =torch.arange(UpperCamelCase_ ).reshape([1, columns] ).repeat(UpperCamelCase_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] _a : List[Any] =row_ids.to(torch.floataa ) _a : int =col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] _a : Any =torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] _a : Any =torch.nn.functional.pad(UpperCamelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float() _a : Union[str, Any] =to_numpy_array(UpperCamelCase_ ) return result def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict = None , **SCREAMING_SNAKE_CASE :Tuple ) -> Any: '''simple docstring''' if image.dtype == np.uinta: _a : Dict =image.astype(np.floataa ) # take mean across the whole `image` _a : Optional[int] =np.mean(UpperCamelCase_ ) _a : str =np.std(UpperCamelCase_ ) _a : int =max(UpperCamelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] = None , SCREAMING_SNAKE_CASE :List[Any] = None , SCREAMING_SNAKE_CASE :Dict = None , SCREAMING_SNAKE_CASE :List[str] = None , SCREAMING_SNAKE_CASE :Tuple = None , SCREAMING_SNAKE_CASE :List[str] = None , SCREAMING_SNAKE_CASE :Tuple = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :str , ) -> str: '''simple docstring''' _a : List[str] =do_normalize if do_normalize is not None else self.do_normalize _a : Tuple =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _a : List[Any] =patch_size if patch_size is not None else self.patch_size _a : Any =max_patches if max_patches is not None else self.max_patches _a : Optional[int] =self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase_ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) _a : Dict =make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: _a : List[str] =[convert_to_rgb(UpperCamelCase_ ) for image in images] # All transformations expect numpy arrays. _a : Optional[int] =[to_numpy_array(UpperCamelCase_ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) _a : Tuple =kwargs.pop("""font_bytes""" , UpperCamelCase_ ) _a : Any =kwargs.pop("""font_path""" , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): _a : Any =[header_text] * len(UpperCamelCase_ ) _a : str =[ render_header(UpperCamelCase_ , header_text[i] , font_bytes=UpperCamelCase_ , font_path=UpperCamelCase_ ) for i, image in enumerate(UpperCamelCase_ ) ] if do_normalize: _a : Optional[int] =[self.normalize(image=UpperCamelCase_ ) for image in images] # convert to torch tensor and permute _a : Optional[Any] =[ self.extract_flattened_patches(image=UpperCamelCase_ , max_patches=UpperCamelCase_ , patch_size=UpperCamelCase_ ) for image in images ] # create attention mask in numpy _a : Optional[int] =[(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] _a : List[str] =BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase_ ) return encoded_outputs
276
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowercase ( A__ ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Any = Generator( cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' if self.streaming: UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: UpperCamelCase__ :Optional[int] = None UpperCamelCase__ :int = None UpperCamelCase__ :Any = None UpperCamelCase__ :Any = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) UpperCamelCase__ :List[Any] = self.builder.as_dataset( split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
97
0
def lowerCamelCase__ ( A__ : Union[str, Any] = 50 ): '''simple docstring''' __lowerCamelCase = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
12
'''simple docstring''' __snake_case = 65521 def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = 1 UpperCamelCase__ :Any = 0 for plain_chr in plain_text: UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER return (b << 16) | a
97
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def __init__( self, lowercase_, lowercase_=7, lowercase_=3, lowercase_=30, lowercase_=400, lowercase_=True, lowercase_=None, lowercase_=True, lowercase_=1 / 255, lowercase_=True, lowercase_=[0.5, 0.5, 0.5], lowercase_=[0.5, 0.5, 0.5], lowercase_=True, ) -> Optional[Any]: """simple docstring""" a__ =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} a__ =parent a__ =batch_size a__ =num_channels a__ =min_resolution a__ =max_resolution a__ =do_resize a__ =size a__ =do_rescale a__ =rescale_factor a__ =do_normalize a__ =image_mean a__ =image_std a__ =do_pad def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def _UpperCAmelCase ( self, lowercase_, lowercase_=False ) -> Tuple: """simple docstring""" if not batched: a__ =image_inputs[0] if isinstance(UpperCamelCase_, Image.Image ): a__ =image.size else: a__ =image.shape[1], image.shape[2] if w < h: a__ =int(self.size['''shortest_edge'''] * h / w ) a__ =self.size['''shortest_edge'''] elif w > h: a__ =self.size['''shortest_edge'''] a__ =int(self.size['''shortest_edge'''] * w / h ) else: a__ =self.size['''shortest_edge'''] a__ =self.size['''shortest_edge'''] else: a__ =[] for image in image_inputs: a__ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a__ =max(UpperCamelCase_, key=lambda lowercase_ : item[0] )[0] a__ =max(UpperCamelCase_, key=lambda lowercase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __magic_name__ ( A__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : int = DetrImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" a__ =DetrImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" a__ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_, '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''do_rescale''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''rescale_factor''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_, '''do_pad''' ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" a__ =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad, UpperCamelCase_ ) a__ =self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad, UpperCamelCase_ ) def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" pass def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" a__ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ =prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_, Image.Image ) # Test not batched input a__ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values a__ =self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched a__ =self.image_processor_tester.get_expected_values(UpperCamelCase_, batched=UpperCamelCase_ ) a__ =image_processing(UpperCamelCase_, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" a__ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ =prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase_, numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_, np.ndarray ) # Test not batched input a__ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values a__ =self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched a__ =image_processing(UpperCamelCase_, return_tensors='''pt''' ).pixel_values a__ =self.image_processor_tester.get_expected_values(UpperCamelCase_, batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" a__ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ =prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase_, torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_, torch.Tensor ) # Test not batched input a__ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values a__ =self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched a__ =image_processing(UpperCamelCase_, return_tensors='''pt''' ).pixel_values a__ =self.image_processor_tester.get_expected_values(UpperCamelCase_, batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def _UpperCAmelCase ( self ) -> int: """simple docstring""" a__ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f: a__ =json.loads(f.read() ) a__ ={'''image_id''': 39769, '''annotations''': target} # encode them a__ =DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' ) a__ =image_processing(images=UpperCamelCase_, annotations=UpperCamelCase_, return_tensors='''pt''' ) # verify pixel values a__ =torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase_ ) a__ =torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase_, atol=1E-4 ) ) # verify area a__ =torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase_ ) ) # verify boxes a__ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase_ ) a__ =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase_, atol=1E-3 ) ) # verify image_id a__ =torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase_ ) ) # verify is_crowd a__ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase_ ) ) # verify class_labels a__ =torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase_ ) ) # verify orig_size a__ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase_ ) ) # verify size a__ =torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase_ ) ) @slow def _UpperCAmelCase ( self ) -> Tuple: """simple docstring""" a__ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f: a__ =json.loads(f.read() ) a__ ={'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} a__ =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them a__ =DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' ) a__ =image_processing(images=UpperCamelCase_, annotations=UpperCamelCase_, masks_path=UpperCamelCase_, return_tensors='''pt''' ) # verify pixel values a__ =torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase_ ) a__ =torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase_, atol=1E-4 ) ) # verify area a__ =torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase_ ) ) # verify boxes a__ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase_ ) a__ =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase_, atol=1E-3 ) ) # verify image_id a__ =torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase_ ) ) # verify is_crowd a__ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase_ ) ) # verify class_labels a__ =torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase_ ) ) # verify masks a__ =822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase_ ) # verify orig_size a__ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase_ ) ) # verify size a__ =torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase_ ) )
188
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowercase ( A__ ): """simple docstring""" _a = 'camembert' def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :Optional[int] = num_hidden_layers UpperCamelCase__ :List[Any] = num_attention_heads UpperCamelCase__ :Union[str, Any] = hidden_act UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :int = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[str] = layer_norm_eps UpperCamelCase__ :int = position_embedding_type UpperCamelCase__ :Any = use_cache UpperCamelCase__ :Any = classifier_dropout class lowercase ( A__ ): """simple docstring""" @property def lowerCAmelCase__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
97
0
from pathlib import Path import fire from tqdm import tqdm def a_ ( __lowercase : List[str]="ro" , __lowercase : Union[str, Any]="en" , __lowercase : Tuple="wmt16" , __lowercase : List[Any]=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('run pip install datasets' ) _snake_case = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) _snake_case = datasets.load_dataset(__a , __a ) if save_dir is None: _snake_case = f'''{dataset}-{pair}''' _snake_case = Path(__a ) save_dir.mkdir(exist_ok=__a ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets _snake_case = '''val''' if split == '''validation''' else split _snake_case = save_dir.joinpath(f'''{fn}.source''' ) _snake_case = save_dir.joinpath(f'''{fn}.target''' ) _snake_case = src_path.open('w+' ) _snake_case = tgt_path.open('w+' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): _snake_case = x['''translation'''] src_fp.write(ex[src_lang] + '\n' ) tgt_fp.write(ex[tgt_lang] + '\n' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
282
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ :str = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :str = min_resolution UpperCamelCase__ :Optional[Any] = max_resolution UpperCamelCase__ :int = do_resize UpperCamelCase__ :Optional[Any] = size UpperCamelCase__ :Tuple = do_normalize UpperCamelCase__ :List[Any] = image_mean UpperCamelCase__ :Dict = image_std UpperCamelCase__ :Union[str, Any] = do_rescale UpperCamelCase__ :Union[str, Any] = rescale_factor UpperCamelCase__ :Union[str, Any] = do_pad def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if not batched: UpperCamelCase__ :List[str] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ :Dict = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ :int = self.size['''shortest_edge'''] UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ :str = self.size['''shortest_edge'''] UpperCamelCase__ :str = self.size['''shortest_edge'''] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.loads(f.read() ) UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target} # encode them UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify orig_size UpperCamelCase__ :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ ) # verify orig_size UpperCamelCase__ :List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
97
0
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): UpperCamelCase__ : Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase__ : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] UpperCamelCase__ : Optional[int] = np.concatenate(__a , axis=0 ) UpperCamelCase__ : List[str] = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ : int = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ : List[str] = 2.0 * image - 1.0 UpperCamelCase__ : int = torch.from_numpy(__a ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase__ : int = torch.cat(__a , dim=0 ) return image def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.9_9_9_5 ) -> Dict: if not isinstance(__a , np.ndarray ): UpperCamelCase__ : str = True UpperCamelCase__ : Optional[int] = va.device UpperCamelCase__ : List[str] = va.cpu().numpy() UpperCamelCase__ : int = va.cpu().numpy() UpperCamelCase__ : List[str] = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) ) if np.abs(__a ) > DOT_THRESHOLD: UpperCamelCase__ : List[str] = (1 - t) * va + t * va else: UpperCamelCase__ : Union[str, Any] = np.arccos(__a ) UpperCamelCase__ : Optional[int] = np.sin(__a ) UpperCamelCase__ : Dict = theta_a * t UpperCamelCase__ : Optional[Any] = np.sin(__a ) UpperCamelCase__ : Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a UpperCamelCase__ : str = sin_theta_t / sin_theta_a UpperCamelCase__ : Optional[int] = sa * va + sa * va if inputs_are_torch: UpperCamelCase__ : List[str] = torch.from_numpy(__a ).to(__a ) return va def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: UpperCamelCase__ : str = F.normalize(__a , dim=-1 ) UpperCamelCase__ : Any = F.normalize(__a , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: for param in model.parameters(): UpperCamelCase__ : Any = value class __a ( A__ ): def __init__( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[Any]=None , ): '''simple docstring''' super().__init__() self.register_modules( vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , clip_model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , coca_model=UpperCamelCase_ , coca_tokenizer=UpperCamelCase_ , coca_transform=UpperCamelCase_ , ) UpperCamelCase__ : List[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , UpperCamelCase_ ) else feature_extractor.size['''shortest_edge'''] ) UpperCamelCase__ : List[str] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , UpperCamelCase_ ) set_requires_grad(self.clip_model , UpperCamelCase_ ) def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase__ : Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase_ ) def __lowercase ( self : int ): '''simple docstring''' self.enable_attention_slicing(UpperCamelCase_ ) def __lowercase ( self : List[str] ): '''simple docstring''' set_requires_grad(self.vae , UpperCamelCase_ ) def __lowercase ( self : List[Any] ): '''simple docstring''' set_requires_grad(self.vae , UpperCamelCase_ ) def __lowercase ( self : List[str] ): '''simple docstring''' set_requires_grad(self.unet , UpperCamelCase_ ) def __lowercase ( self : Optional[int] ): '''simple docstring''' set_requires_grad(self.unet , UpperCamelCase_ ) def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' UpperCamelCase__ : int = min(int(num_inference_steps * strength ) , UpperCamelCase_ ) UpperCamelCase__ : Dict = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase__ : int = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __lowercase ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None ): '''simple docstring''' if not isinstance(UpperCamelCase_ , torch.Tensor ): raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase_ )}' ) UpperCamelCase__ : List[str] = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ : int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase_ ) ] UpperCamelCase__ : str = torch.cat(UpperCamelCase_ , dim=0 ) else: UpperCamelCase__ : Tuple = self.vae.encode(UpperCamelCase_ ).latent_dist.sample(UpperCamelCase_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase__ : str = 0.1_8_2_1_5 * init_latents UpperCamelCase__ : Optional[Any] = init_latents.repeat_interleave(UpperCamelCase_ , dim=0 ) UpperCamelCase__ : Any = randn_tensor(init_latents.shape , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) # get latents UpperCamelCase__ : Tuple = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ : List[Any] = init_latents return latents def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : Any = self.coca_transform(UpperCamelCase_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCamelCase__ : List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCamelCase__ : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.feature_extractor.preprocess(UpperCamelCase_ ) UpperCamelCase__ : Any = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() UpperCamelCase__ : str = self.clip_model.get_image_features(UpperCamelCase_ ) UpperCamelCase__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ ) UpperCamelCase__ : Union[str, Any] = image_embeddings_clip.repeat_interleave(UpperCamelCase_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' UpperCamelCase__ : List[str] = latents.detach().requires_grad_() UpperCamelCase__ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) # predict the noise residual UpperCamelCase__ : List[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCamelCase__ : Union[str, Any] = self.scheduler.alphas_cumprod[timestep] UpperCamelCase__ : Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase__ : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCamelCase__ : Any = torch.sqrt(UpperCamelCase_ ) UpperCamelCase__ : Dict = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , UpperCamelCase_ ): UpperCamelCase__ : Any = self.scheduler.sigmas[index] UpperCamelCase__ : Union[str, Any] = latents - sigma * noise_pred else: raise ValueError(F'scheduler type {type(self.scheduler )} not supported' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase__ : Dict = 1 / 0.1_8_2_1_5 * sample UpperCamelCase__ : List[Any] = self.vae.decode(UpperCamelCase_ ).sample UpperCamelCase__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ : List[Any] = transforms.Resize(self.feature_extractor_size )(UpperCamelCase_ ) UpperCamelCase__ : Optional[int] = self.normalize(UpperCamelCase_ ).to(latents.dtype ) UpperCamelCase__ : Union[str, Any] = self.clip_model.get_image_features(UpperCamelCase_ ) UpperCamelCase__ : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ ) UpperCamelCase__ : Any = spherical_dist_loss(UpperCamelCase_ , UpperCamelCase_ ).mean() * clip_guidance_scale UpperCamelCase__ : str = -torch.autograd.grad(UpperCamelCase_ , UpperCamelCase_ )[0] if isinstance(self.scheduler , UpperCamelCase_ ): UpperCamelCase__ : List[Any] = latents.detach() + grads * (sigma**2) UpperCamelCase__ : Optional[int] = noise_pred_original else: UpperCamelCase__ : Tuple = noise_pred_original - torch.sqrt(UpperCamelCase_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12 , SCREAMING_SNAKE_CASE : List[str] = 5_12 , SCREAMING_SNAKE_CASE : Optional[int] = 0.6 , SCREAMING_SNAKE_CASE : Any = 50 , SCREAMING_SNAKE_CASE : Dict = 7.5 , SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , SCREAMING_SNAKE_CASE : List[Any] = 0.0 , SCREAMING_SNAKE_CASE : str = 1_00 , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : int = "pil" , SCREAMING_SNAKE_CASE : Union[str, Any] = True , SCREAMING_SNAKE_CASE : Any = 0.8 , SCREAMING_SNAKE_CASE : Dict = 0.1 , SCREAMING_SNAKE_CASE : str = 0.1 , ): '''simple docstring''' if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError(F'You have passed {batch_size} batch_size, but only {len(UpperCamelCase_ )} generators.' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if isinstance(UpperCamelCase_ , torch.Generator ) and batch_size > 1: UpperCamelCase__ : List[Any] = [generator] + [None] * (batch_size - 1) UpperCamelCase__ : Union[str, Any] = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCamelCase__ : Union[str, Any] = [x[0] for x in coca_is_none if x[1]] UpperCamelCase__ : Union[str, Any] = ''', '''.join(UpperCamelCase_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCamelCase_ ): raise ValueError( F'Content prompt is None and CoCa [{coca_is_none_str}] is None.' F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) UpperCamelCase__ : List[str] = self.get_image_description(UpperCamelCase_ ) if style_prompt is None: if len(UpperCamelCase_ ): raise ValueError( F'Style prompt is None and CoCa [{coca_is_none_str}] is None.' F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) UpperCamelCase__ : List[Any] = self.get_image_description(UpperCamelCase_ ) # get prompt text embeddings for content and style UpperCamelCase__ : Union[str, Any] = self.tokenizer( UpperCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors="pt" , ) UpperCamelCase__ : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCamelCase__ : Any = self.tokenizer( UpperCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors="pt" , ) UpperCamelCase__ : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCamelCase__ : str = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase__ : List[str] = text_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 ) # set timesteps UpperCamelCase__ : Dict = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCamelCase__ : Any = {} if accepts_offset: UpperCamelCase__ : Tuple = 1 self.scheduler.set_timesteps(UpperCamelCase_ , **UpperCamelCase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCamelCase__ : int = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device ) UpperCamelCase__ : Optional[Any] = timesteps[:1].repeat(UpperCamelCase_ ) # Preprocess image UpperCamelCase__ : Any = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ : Dict = self.prepare_latents( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ ) UpperCamelCase__ : Optional[int] = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ : Optional[Any] = self.prepare_latents( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ ) UpperCamelCase__ : int = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip_guidance_scale > 0: UpperCamelCase__ : Optional[Any] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ : List[str] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ : Dict = slerp( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase__ : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ : str = content_text_input.input_ids.shape[-1] UpperCamelCase__ : List[str] = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase_ , return_tensors="pt" ) UpperCamelCase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCamelCase__ : Optional[Any] = uncond_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase__ : Dict = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCamelCase__ : Any = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device="cpu" , dtype=UpperCamelCase_ ).to( self.device ) else: UpperCamelCase__ : int = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) UpperCamelCase__ : Union[str, Any] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ : Any = {} if accepts_eta: UpperCamelCase__ : Union[str, Any] = eta # check if the scheduler accepts generator UpperCamelCase__ : int = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCamelCase__ : List[Any] = generator with self.progress_bar(total=UpperCamelCase_ ): for i, t in enumerate(UpperCamelCase_ ): # expand the latents if we are doing classifier free guidance UpperCamelCase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase__ : str = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) # predict the noise residual UpperCamelCase__ : str = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ : List[str] = noise_pred.chunk(2 ) UpperCamelCase__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCamelCase__ : Dict = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCamelCase__ : Optional[int] = self.cond_fn( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ : Dict = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase__ : Any = 1 / 0.1_8_2_1_5 * latents UpperCamelCase__ : Union[str, Any] = self.vae.decode(UpperCamelCase_ ).sample UpperCamelCase__ : Any = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ )
189
'''simple docstring''' from collections import defaultdict class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCamelCase__ :Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1 def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCamelCase__ :Optional[int] = total_ways_util return self.dp[mask][task_no] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": __snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
97
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
139
'''simple docstring''' import csv import tweepy # Twitter API credentials __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' def a ( __a ) -> None: '''simple docstring''' UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a ) auth.set_access_token(__a , __a ) UpperCamelCase__ :List[str] = tweepy.API(__a ) # initialize a list to hold all the tweepy Tweets UpperCamelCase__ :Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 ) # save most recent tweets alltweets.extend(__a ) # save the id of the oldest tweet less one UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__a ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates UpperCamelCase__ :Union[str, Any] = api.user_timeline( screen_name=__a , count=200 , max_id=__a ) # save most recent tweets alltweets.extend(__a ) # update the id of the oldest tweet less one UpperCamelCase__ :Tuple = alltweets[-1].id - 1 print(f'''...{len(__a )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f: UpperCamelCase__ :Tuple = csv.writer(__a ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__a ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
97
0
import os def UpperCamelCase( __UpperCamelCase : Optional[int] = "input.txt" ): with open(os.path.join(os.path.dirname(__a ) ,__a ) ) as input_file: lowerCAmelCase_ : str = [ [int(__a ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowerCAmelCase_ : Any = len(__a ) lowerCAmelCase_ : Optional[Any] = len(matrix[0] ) lowerCAmelCase_ : Any = [[-1 for _ in range(__a )] for _ in range(__a )] for i in range(__a ): lowerCAmelCase_ : List[Any] = matrix[i][0] for j in range(1 ,__a ): for i in range(__a ): lowerCAmelCase_ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 ,__a ): lowerCAmelCase_ : List[str] = min( minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 ,-1 ,-1 ): lowerCAmelCase_ : List[Any] = min( minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
103
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCamelCase__ :List[str] = value else: UpperCamelCase__ :Dict = value return new_state_dict def a ( __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Tuple = in_proj_bias[:256] UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ :Tuple = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Optional[int] = in_proj_bias[:256] UpperCamelCase__ :Tuple = in_proj_weight[256:512, :] UpperCamelCase__ :Dict = in_proj_bias[256:512] UpperCamelCase__ :Any = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ :List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:] def a ( __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :str = image.size UpperCamelCase__ :Optional[Any] = max(__a , __a ) UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ :Dict = target_max_size / current_max_size UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = F.to_tensor(__a ) UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a ( __a , __a , __a ) -> Dict: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ :Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ :Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCamelCase__ :Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val # create HuggingFace model and load state dict UpperCamelCase__ :str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ :List[str] = 15 UpperCamelCase__ :int = 2 UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ :int = idalabel UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ :int = 125 UpperCamelCase__ :List[str] = 6 UpperCamelCase__ :Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ :Dict = idalabel UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ :List[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCamelCase__ :int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a ) UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' ) UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ :Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ :Dict = (1, 15, 3) UpperCamelCase__ :List[Any] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ :Optional[Any] = (1, 125, 7) UpperCamelCase__ :Dict = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCamelCase__ :Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
97
0
'''simple docstring''' from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _snake_case = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n' _snake_case = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n' _snake_case = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def _A ( snake_case , snake_case ) -> List[str]: return float((preds == labels).mean() ) def _A ( snake_case , snake_case ) -> List[Any]: _lowercase : Dict = simple_accuracy(__a , __a ) _lowercase : List[str] = float(fa_score(y_true=__a , y_pred=__a ) ) return { "accuracy": acc, "f1": fa, } def _A ( snake_case , snake_case ) -> Union[str, Any]: _lowercase : int = float(pearsonr(__a , __a )[0] ) _lowercase : List[str] = float(spearmanr(__a , __a )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def _lowerCamelCase ( self ): """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( "You should supply a configuration name selected in " "[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", " "\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ), "references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )} elif self.config_name == "stsb": return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )} else: raise KeyError( "You should supply a configuration name selected in " "[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", " "\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
250
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( __a ) -> bool: '''simple docstring''' UpperCamelCase__ :int = int(number**0.5 ) return number == sq * sq def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]: '''simple docstring''' UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den UpperCamelCase__ :int = x_den * y_den * z_den UpperCamelCase__ :int = gcd(__a , __a ) top //= hcf bottom //= hcf return top, bottom def a ( __a = 35 ) -> int: '''simple docstring''' UpperCamelCase__ :set = set() UpperCamelCase__ :int UpperCamelCase__ :Fraction = Fraction(0 ) UpperCamelCase__ :tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 UpperCamelCase__ :int = x_num * y_den + x_den * y_num UpperCamelCase__ :Any = x_den * y_den UpperCamelCase__ :Tuple = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Optional[int] = int(sqrt(__a ) ) UpperCamelCase__ :int = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=-1 UpperCamelCase__ :Tuple = x_num * y_num UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den UpperCamelCase__ :List[str] = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Union[str, Any] = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num UpperCamelCase__ :Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :str = int(sqrt(__a ) ) UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Dict = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :int = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a , __a ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
97
0
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class a__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : int, lowerCAmelCase : Tuple, lowerCAmelCase : int, lowerCAmelCase : List[Any], lowerCAmelCase : Dict = 1.0, lowerCAmelCase : Dict = None, ) -> Tuple: super().__init__() lowercase : Dict = initial_learning_rate lowercase : Optional[int] = warmup_steps lowercase : str = power lowercase : Dict = decay_schedule_fn lowercase : List[Any] = name def __call__( self : Optional[int], lowerCAmelCase : List[str] ) -> Dict: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowercase : int = tf.cast(UpperCamelCase_, tf.floataa ) lowercase : int = tf.cast(self.warmup_steps, tf.floataa ) lowercase : Any = global_step_float / warmup_steps_float lowercase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(UpperCamelCase_, self.power ) return tf.cond( global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=UpperCamelCase_, ) def lowercase ( self : int ) -> Tuple: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0.9 , _UpperCAmelCase = 0.9_9_9 , _UpperCAmelCase = 1e-8 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ) -> Optional[Any]: '''simple docstring''' lowercase : Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__a , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__a , ) if num_warmup_steps: lowercase : int = WarmUp( initial_learning_rate=__a , decay_schedule_fn=__a , warmup_steps=__a , ) if weight_decay_rate > 0.0: lowercase : int = AdamWeightDecay( learning_rate=__a , weight_decay_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=__a , ) else: lowercase : Optional[int] = tf.keras.optimizers.Adam( learning_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class a__ ( A__ ): def __init__( self : List[Any], lowerCAmelCase : Dict = 0.001, lowerCAmelCase : Optional[Any] = 0.9, lowerCAmelCase : int = 0.999, lowerCAmelCase : Optional[int] = 1e-7, lowerCAmelCase : Optional[int] = False, lowerCAmelCase : Dict = 0.0, lowerCAmelCase : str = None, lowerCAmelCase : List[str] = None, lowerCAmelCase : Union[str, Any] = "AdamWeightDecay", **lowerCAmelCase : List[Any], ) -> str: super().__init__(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, **UpperCamelCase_ ) lowercase : Any = weight_decay_rate lowercase : Union[str, Any] = include_in_weight_decay lowercase : Optional[int] = exclude_from_weight_decay @classmethod def lowercase ( cls : Any, lowerCAmelCase : Union[str, Any] ) -> List[Any]: lowercase : Tuple = {'''WarmUp''': WarmUp} return super(UpperCamelCase_, cls ).from_config(UpperCamelCase_, custom_objects=UpperCamelCase_ ) def lowercase ( self : Optional[int], lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str ) -> Tuple: super(UpperCamelCase_, self )._prepare_local(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ) lowercase : List[str] = tf.constant( self.weight_decay_rate, name='adam_weight_decay_rate' ) def lowercase ( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> List[str]: lowercase : Optional[int] = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'], use_locking=self._use_locking, ) return tf.no_op() def lowercase ( self : Dict, lowerCAmelCase : List[Any], lowerCAmelCase : str=None, **lowerCAmelCase : Any ) -> Optional[Any]: lowercase : List[str] = list(zip(*UpperCamelCase_ ) ) return super(UpperCamelCase_, self ).apply_gradients(zip(UpperCamelCase_, UpperCamelCase_ ), name=UpperCamelCase_, **UpperCamelCase_ ) def lowercase ( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str] ) -> int: if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowercase : Dict = apply_state or {} lowercase : str = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowercase : List[str] = self._fallback_apply_state(UpperCamelCase_, UpperCamelCase_ ) lowercase : List[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowercase ( self : Union[str, Any], lowerCAmelCase : Any, lowerCAmelCase : int, lowerCAmelCase : List[str]=None ) -> Any: lowercase : Union[str, Any] = self._get_lr(var.device, var.dtype.base_dtype, UpperCamelCase_ ) lowercase : Dict = self._decay_weights_op(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_, self )._resource_apply_dense(UpperCamelCase_, UpperCamelCase_, **UpperCamelCase_ ) def lowercase ( self : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Any, lowerCAmelCase : List[str], lowerCAmelCase : Union[str, Any]=None ) -> int: lowercase : Union[str, Any] = self._get_lr(var.device, var.dtype.base_dtype, UpperCamelCase_ ) lowercase : Dict = self._decay_weights_op(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase_, self )._resource_apply_sparse(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, **UpperCamelCase_ ) def lowercase ( self : str ) -> str: lowercase : Any = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowercase ( self : Union[str, Any], lowerCAmelCase : str ) -> Optional[Any]: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCamelCase_, UpperCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCamelCase_, UpperCamelCase_ ) is not None: return False return True class a__ ( A__ ): def __init__( self : str ) -> List[Any]: lowercase : Union[str, Any] = [] lowercase : Tuple = None @property def lowercase ( self : Optional[int] ) -> str: if self._accum_steps is None: lowercase : List[str] = tf.Variable( tf.constant(0, dtype=tf.intaa ), trainable=UpperCamelCase_, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) return self._accum_steps.value() @property def lowercase ( self : str ) -> Optional[int]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[int], lowerCAmelCase : List[Any] ) -> Optional[Any]: if not self._gradients: lowercase : Tuple = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCamelCase_ ), trainable=UpperCamelCase_, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCamelCase_ ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients, UpperCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCamelCase_ ) self._accum_steps.assign_add(1 ) def lowercase ( self : Union[str, Any] ) -> str: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
255
'''simple docstring''' def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Optional[int] = [] UpperCamelCase__ :int = 1 while len(__a ) < 1e6: constant.append(str(__a ) ) i += 1 UpperCamelCase__ :Union[str, Any] = ''''''.join(__a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
97
0
"""simple docstring""" from PIL import Image def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple ) ->Image: """simple docstring""" def brightness(_snake_case : Optional[Any] ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE : str = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
102
'''simple docstring''' from PIL import Image def a ( __a , __a ) -> Image: '''simple docstring''' def brightness(__a ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 __snake_case = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
97
0
"""simple docstring""" def _snake_case ( lowerCamelCase__ : str ) -> None: lowerCamelCase_ : Optional[int] =generate_pascal_triangle(__a ) for row_idx in range(__a ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def _snake_case ( lowerCamelCase__ : int ) -> list[list[int]]: if not isinstance(__a , __a ): raise TypeError("The input value of \'num_rows\' should be \'int\'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of \'num_rows\' should be greater than or equal to 0" ) lowerCamelCase_ : list[list[int]] =[] for current_row_idx in range(__a ): lowerCamelCase_ : List[Any] =populate_current_row(__a , __a ) triangle.append(__a ) return triangle def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> list[int]: lowerCamelCase_ : int =[-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowerCamelCase_ : Dict =1, 1 for current_col_idx in range(1 , __a ): calculate_current_element( __a , __a , __a , __a ) return current_row def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> None: lowerCamelCase_ : Any =triangle[current_row_idx - 1][current_col_idx - 1] lowerCamelCase_ : List[Any] =triangle[current_row_idx - 1][current_col_idx] lowerCamelCase_ : List[Any] =above_to_left_elt + above_to_right_elt def _snake_case ( lowerCamelCase__ : List[str] ) -> list[list[int]]: if not isinstance(__a , __a ): raise TypeError("The input value of \'num_rows\' should be \'int\'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of \'num_rows\' should be greater than or equal to 0" ) lowerCamelCase_ : list[list[int]] =[[1]] for row_index in range(1 , __a ): lowerCamelCase_ : Any =[0] + result[-1] + [0] lowerCamelCase_ : Optional[int] =row_index + 1 # Calculate the number of distinct elements in a row lowerCamelCase_ : Any =sum(divmod(__a , 2 ) ) lowerCamelCase_ : Optional[int] =[ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowerCamelCase_ : Optional[int] =row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowerCamelCase_ : Dict =row_first_half + row_second_half result.append(__a ) return result def _snake_case ( ) -> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) -> None: lowerCamelCase_ : List[str] =F"""{func.__name__}({value})""" lowerCamelCase_ : List[str] =timeit(F"""__main__.{call}""" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"""{call:38} -- {timing:.4f} seconds""" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__a , __a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
144
'''simple docstring''' from datetime import datetime as dt import os from github import Github __snake_case = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a ( ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' ) UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
97
0
'''simple docstring''' from sklearn.metrics import matthews_corrcoef import datasets A__: Optional[int] = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' A__: List[Any] = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' A__: str = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCAmelCase ( self :List[str] ) -> Any: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple=None ) -> Tuple: '''simple docstring''' return { "matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ), }
276
'''simple docstring''' import re from filelock import FileLock try: import nltk __snake_case = True except (ImportError, ModuleNotFoundError): __snake_case = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def a ( __a ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
97
0
from __future__ import annotations import numpy as np def lowerCamelCase__ ( A__ : Any ): '''simple docstring''' return np.maximum(0 , __a ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
12
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a ) if save_dir is None: UpperCamelCase__ :Any = f'''{dataset}-{pair}''' UpperCamelCase__ :Dict = Path(__a ) save_dir.mkdir(exist_ok=__a ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' ) UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' ) UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' ) UpperCamelCase__ :Tuple = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): UpperCamelCase__ :Union[str, Any] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
97
0
from torch import nn def UpperCAmelCase__ ( _A : Union[str, Any] ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
188
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''DPTFeatureExtractor'''] __snake_case = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
97
0
import math import sys def a_ ( __lowercase : str ) -> int: if number != int(__a ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 _snake_case = [-1] * (number + 1) _snake_case = 0 for i in range(1 , number + 1 ): _snake_case = sys.maxsize _snake_case = int(math.sqrt(__a ) ) for j in range(1 , root + 1 ): _snake_case = 1 + answers[i - (j**2)] _snake_case = min(__a , __a ) _snake_case = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
282
'''simple docstring''' def a ( __a , __a ) -> int: '''simple docstring''' if len(__a ) != len(__a ): raise ValueError('''String lengths must match!''' ) UpperCamelCase__ :Union[str, Any] = 0 for chara, chara in zip(__a , __a ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
97
0
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowerCamelCase : Optional[int] ={ '''E''': 12.70, '''T''': 9.06, '''A''': 8.17, '''O''': 7.51, '''I''': 6.97, '''N''': 6.75, '''S''': 6.33, '''H''': 6.09, '''R''': 5.99, '''D''': 4.25, '''L''': 4.03, '''C''': 2.78, '''U''': 2.76, '''M''': 2.41, '''W''': 2.36, '''F''': 2.23, '''G''': 2.02, '''Y''': 1.97, '''P''': 1.93, '''B''': 1.29, '''V''': 0.98, '''K''': 0.77, '''J''': 0.15, '''X''': 0.15, '''Q''': 0.10, '''Z''': 0.07, } lowerCamelCase : Union[str, Any] ='''ETAOINSHRDLCUMWFGYPBVKJXQZ''' lowerCamelCase : List[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> dict[str, int]: UpperCamelCase__ : Optional[int] = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: return x[0] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: UpperCamelCase__ : Any = get_letter_count(__a ) UpperCamelCase__ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__a ) UpperCamelCase__ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__a ) UpperCamelCase__ : List[Any] = ''''''.join(freq_to_letter[freq] ) UpperCamelCase__ : Tuple = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__a , reverse=__a ) UpperCamelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__a ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: UpperCamelCase__ : List[Any] = get_frequency_order(__a ) UpperCamelCase__ : Any = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
189
'''simple docstring''' def a ( __a ) -> "list[int]": '''simple docstring''' if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCamelCase__ :int = 1 if upper_limit > 0: UpperCamelCase__ :int = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__a ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: __snake_case = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
97
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) A_ = logging.getLogger(__name__) @dataclass class _snake_case : _A : Dict = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _A : List[str] = field( default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _A : int = field( default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _A : int = field( default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) _A : Tuple = field(default=A__ , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) _A : Any = field(default=A__ , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class _snake_case : _A : Tuple = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) _A : Optional[Any] = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) _A : int = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _A : Dict = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _A : Union[str, Any] = field( default=1_4_2 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) _A : Optional[int] = field( default=1_4_2 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _A : Union[str, Any] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) _A : Union[str, Any] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) _A : List[str] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) _A : Any = field(default=A__ , metadata={'''help''': '''Source language id for translation.'''} ) _A : List[str] = field(default=A__ , metadata={'''help''': '''Target language id for translation.'''} ) _A : Optional[Any] = field(default=A__ , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) _A : Optional[int] = field( default=A__ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def A_ ( snake_case , snake_case , snake_case ): logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(__a , os.path.join(__a , F'''{split}_results.json''' ) ) def A_ ( ): SCREAMING_SNAKE_CASE:Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE:int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE:Union[str, Any] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE:int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE:List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__a , __a , getattr(__a , __a ) ) SCREAMING_SNAKE_CASE:Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE:int = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: SCREAMING_SNAKE_CASE:Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: SCREAMING_SNAKE_CASE:List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) SCREAMING_SNAKE_CASE:str = SeqaSeqDataset # Get datasets SCREAMING_SNAKE_CASE:List[Any] = ( dataset_class( __a , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE:List[str] = ( dataset_class( __a , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) SCREAMING_SNAKE_CASE:Optional[int] = ( dataset_class( __a , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer SCREAMING_SNAKE_CASE:List[str] = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) SCREAMING_SNAKE_CASE:Optional[int] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) SCREAMING_SNAKE_CASE:Any = {} # Training if training_args.do_train: logger.info("*** Train ***" ) SCREAMING_SNAKE_CASE:List[str] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) SCREAMING_SNAKE_CASE:Any = train_result.metrics SCREAMING_SNAKE_CASE:str = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) SCREAMING_SNAKE_CASE:Optional[int] = trainer.evaluate(metric_key_prefix="val" ) SCREAMING_SNAKE_CASE:Union[str, Any] = data_args.n_val SCREAMING_SNAKE_CASE:List[Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info("*** Predict ***" ) SCREAMING_SNAKE_CASE:Any = trainer.predict(test_dataset=__a , metric_key_prefix="test" ) SCREAMING_SNAKE_CASE:Optional[int] = test_output.metrics SCREAMING_SNAKE_CASE:Union[str, Any] = data_args.n_test if trainer.is_world_process_zero(): SCREAMING_SNAKE_CASE:str = round(metrics["test_loss"] , 4 ) handle_metrics("test" , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: SCREAMING_SNAKE_CASE:Dict = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) SCREAMING_SNAKE_CASE:List[str] = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def A_ ( snake_case ): main() if __name__ == "__main__": main()
139
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a ( __a , __a ) -> Optional[int]: '''simple docstring''' assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase__ :Tuple = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def a ( __a , __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :int = tmp_path / '''cache''' UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} UpperCamelCase__ :Any = features.copy() if features else default_expected_features UpperCamelCase__ :Union[str, Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} UpperCamelCase__ :int = features.copy() UpperCamelCase__ :List[Any] = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Optional[int] = tmp_path / '''cache''' UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read() assert isinstance(__a , __a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache''' UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_json_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def a ( __a , __a , __a ) -> Any: '''simple docstring''' if issubclass(__a , __a ): UpperCamelCase__ :Union[str, Any] = jsonl_path elif issubclass(__a , __a ): UpperCamelCase__ :int = [jsonl_path] UpperCamelCase__ :Dict = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_dataset(__a , __a ) def a ( __a , __a , __a=("train",) ) -> Optional[Any]: '''simple docstring''' assert isinstance(__a , __a ) for split in splits: UpperCamelCase__ :Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[str] = tmp_path / '''cache''' UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def a ( __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path / '''cache''' UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features UpperCamelCase__ :str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( __a , __a , __a ) -> str: '''simple docstring''' if split: UpperCamelCase__ :List[str] = {split: jsonl_path} else: UpperCamelCase__ :int = '''train''' UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path} UpperCamelCase__ :Any = tmp_path / '''cache''' UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read() _check_json_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def a ( __a ) -> Union[str, Any]: '''simple docstring''' return json.load(__a ) def a ( __a ) -> int: '''simple docstring''' return [json.loads(__a ) for line in buffer] class lowercase : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write() buffer.seek(0 ) UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(exported_content[0] , UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase__ :int = load_json(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase_ ) == 10 def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' with pytest.raises(UpperCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :Dict = f.read() with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f: UpperCamelCase__ :int = f.read() assert exported_content == original_content
97
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __snake_case ( A__ ): _a = CustomTokenizer pass
103
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowercase ( A__ ): """simple docstring""" _a = ComputeEnvironment.AMAZON_SAGEMAKER _a = True _a = 'ml.p3.2xlarge' _a = 'accelerate_sagemaker_execution_role' _a = 'hf-sm' _a = 'us-east-1' _a = 1 _a = 'accelerate-sagemaker-1' _a = '1.6' _a = '4.4' _a = 'train.py' _a = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] _a = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ ) assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ ) assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ ) assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ ) assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ ) with pytest.raises(UpperCamelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
97
0
'''simple docstring''' def _A ( snake_case ) -> List[Any]: _lowercase : Optional[int] = [], [] while len(__a ) > 1: _lowercase : Union[str, Any] = min(__a ), max(__a ) start.append(__a ) end.append(__a ) collection.remove(__a ) collection.remove(__a ) end.reverse() return start + collection + end if __name__ == "__main__": _snake_case = input('Enter numbers separated by a comma:\n').strip() _snake_case = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
250
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def a ( __a ) -> int: '''simple docstring''' for param in module.parameters(): UpperCamelCase__ :Dict = False def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCamelCase__ :Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Dict = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def a ( ) -> str: '''simple docstring''' UpperCamelCase__ :int = datetime.now() UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' ) return timestamp
97
0
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _UpperCamelCase: Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' _UpperCamelCase: List[str] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' _UpperCamelCase: List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def lowercase ( self : str ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/krishnap25/mauve', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence' ), 'references': datasets.Value('string', id='sequence' ), } ), codebase_urls=['https://github.com/krishnap25/mauve'], reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ], ) def lowercase ( self : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : Tuple=None, lowerCAmelCase : str=None, lowerCAmelCase : Optional[int]=None, lowerCAmelCase : Dict=None, lowerCAmelCase : Any="auto", lowerCAmelCase : Optional[int]=-1, lowerCAmelCase : Any=0.9, lowerCAmelCase : Optional[Any]=5, lowerCAmelCase : Tuple=500, lowerCAmelCase : int="gpt2-large", lowerCAmelCase : List[str]=-1, lowerCAmelCase : Any=1024, lowerCAmelCase : Tuple=25, lowerCAmelCase : Dict=5, lowerCAmelCase : Dict=True, lowerCAmelCase : Union[str, Any]=25, ) -> str: lowercase : List[Any] = compute_mauve( p_text=UpperCamelCase_, q_text=UpperCamelCase_, p_features=UpperCamelCase_, q_features=UpperCamelCase_, p_tokens=UpperCamelCase_, q_tokens=UpperCamelCase_, num_buckets=UpperCamelCase_, pca_max_data=UpperCamelCase_, kmeans_explained_var=UpperCamelCase_, kmeans_num_redo=UpperCamelCase_, kmeans_max_iter=UpperCamelCase_, featurize_model_name=UpperCamelCase_, device_id=UpperCamelCase_, max_text_length=UpperCamelCase_, divergence_curve_discretization_size=UpperCamelCase_, mauve_scaling_factor=UpperCamelCase_, verbose=UpperCamelCase_, seed=UpperCamelCase_, ) return out
255
'''simple docstring''' from scipy.stats import pearsonr import datasets __snake_case = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_pvalue: UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
97
0
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE : Any = logging.getLogger() def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) __snake_case : List[Any] = parser.parse_args() return args.f class _UpperCAmelCase ( A__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ): __snake_case : Tuple = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(UpperCamelCase_ , 0.666 ) @slow @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(UpperCamelCase_ ) __snake_case : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(UpperCamelCase_ ) __snake_case : str = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(UpperCamelCase_ )
102
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case = logging.get_logger(__name__) __snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __snake_case = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __snake_case = { '''facebook/blenderbot_small-90M''': 512, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Union[str, Any] = add_prefix_space def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.sep_token_id] UpperCamelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
97
0
"""simple docstring""" A__ : List[str] = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
144
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
97
0
'''simple docstring''' import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. A__: Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> int: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> List[str]: from transformers.testing_utils import pytest_terminal_summary_main _a : List[str] =terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(__a ,id=__a )
276
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowercase ( A__ ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Any = Generator( cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' if self.streaming: UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: UpperCamelCase__ :Optional[int] = None UpperCamelCase__ :int = None UpperCamelCase__ :Any = None UpperCamelCase__ :Any = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) UpperCamelCase__ :List[Any] = self.builder.as_dataset( split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
97
0
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger UpperCAmelCase_ = get_logger(__name__) UpperCAmelCase_ = Path(__file__).parent / 'model_card_template.md' UpperCAmelCase_ = uuida().hex UpperCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def lowerCamelCase__ ( A__ : str = None ): '''simple docstring''' __lowerCamelCase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__a , __a ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(__a , __a ): ua += "; " + user_agent return ua def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Tuple = None , A__ : List[str] = None ): '''simple docstring''' if token is None: __lowerCamelCase = HfFolder.get_token() if organization is None: __lowerCamelCase = whoami(__a )['''name'''] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def lowerCamelCase__ ( A__ : Dict , A__ : List[str] ): '''simple docstring''' if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(__a , """local_rank""" ) and args.local_rank not in [-1, 0]: return __lowerCamelCase = args.hub_token if hasattr(__a , """hub_token""" ) else None __lowerCamelCase = get_full_repo_name(__a , token=__a ) __lowerCamelCase = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__a , """gradient_accumulation_steps""" ) else None ) , adam_betaa=args.adam_betaa if hasattr(__a , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(__a , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(__a , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , ) __lowerCamelCase = os.path.join(args.output_dir , """README.md""" ) model_card.save(__a ) def lowerCamelCase__ ( A__ : Any , A__ : str = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash __lowerCamelCase = str(Path(__a ).as_posix() ) __lowerCamelCase = re.search(R"""snapshots/([^/]+)/""" , __a ) if search is None: return None __lowerCamelCase = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. UpperCAmelCase_ = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) UpperCAmelCase_ = os.path.join(hf_cache_home, 'diffusers') def lowerCamelCase__ ( A__ : Tuple = None , A__ : str = None ): '''simple docstring''' if new_cache_dir is None: __lowerCamelCase = DIFFUSERS_CACHE if old_cache_dir is None: __lowerCamelCase = old_diffusers_cache __lowerCamelCase = Path(__a ).expanduser() __lowerCamelCase = Path(__a ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): __lowerCamelCase = new_cache_dir / old_blob_path.relative_to(__a ) new_blob_path.parent.mkdir(parents=__a , exist_ok=__a ) os.replace(__a , __a ) try: os.symlink(__a , __a ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). UpperCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): UpperCAmelCase_ = 0 else: with open(cache_version_file) as f: try: UpperCAmelCase_ = int(f.read()) except ValueError: UpperCAmelCase_ = 0 if cache_version < 1: UpperCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: UpperCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ 'the directory exists and can be written to.' ) def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any] = None ): '''simple docstring''' if variant is not None: __lowerCamelCase = weights_name.split(""".""" ) __lowerCamelCase = splits[:-1] + [variant] + splits[-1:] __lowerCamelCase = '''.'''.join(__a ) return weights_name def lowerCamelCase__ ( A__ : Tuple , *, A__ : Tuple , A__ : Optional[Any] , A__ : List[Any] , A__ : List[Any] , A__ : Union[str, Any] , A__ : int , A__ : List[Any] , A__ : str , A__ : str , A__ : Any , A__ : Dict=None , ): '''simple docstring''' __lowerCamelCase = str(__a ) if os.path.isfile(__a ): return pretrained_model_name_or_path elif os.path.isdir(__a ): if os.path.isfile(os.path.join(__a , __a ) ): # Load from a PyTorch checkpoint __lowerCamelCase = os.path.join(__a , __a ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__a , __a , __a ) ): __lowerCamelCase = os.path.join(__a , __a , __a ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__a ).base_version ) >= version.parse("""0.20.0""" ) ): try: __lowerCamelCase = hf_hub_download( __a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , __a , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}\' so that the correct variant file can be added.' , __a , ) try: # 2. Load model file as usual __lowerCamelCase = hf_hub_download( __a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' """listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' """this model name. Check the model page at """ f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.""" ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' """\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. """ f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
12
'''simple docstring''' __snake_case = 65521 def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = 1 UpperCamelCase__ :Any = 0 for plain_chr in plain_text: UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER return (b << 16) | a
97
0
def UpperCAmelCase__ ( _A : Optional[int] ): '''simple docstring''' for i in range(0 , __a ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def UpperCAmelCase__ ( _A : str ): '''simple docstring''' for i in range(__a , 0 , -1 ): for _ in range(__a , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def UpperCAmelCase__ ( _A : Any ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(__a ) # upper half reverse_floyd(__a ) # lower half if __name__ == "__main__": print(r'''| /\ | |- | |- |--| |\ /| |-''') print(r'''|/ \| |- |_ |_ |__| | \/ | |_''') lowerCamelCase = 1 while K: lowerCamelCase = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) lowerCamelCase = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
188
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowercase ( A__ ): """simple docstring""" _a = 'camembert' def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :Optional[int] = num_hidden_layers UpperCamelCase__ :List[Any] = num_attention_heads UpperCamelCase__ :Union[str, Any] = hidden_act UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :int = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[str] = layer_norm_eps UpperCamelCase__ :int = position_embedding_type UpperCamelCase__ :Any = use_cache UpperCamelCase__ :Any = classifier_dropout class lowercase ( A__ ): """simple docstring""" @property def lowerCAmelCase__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
97
0
import math import random from typing import Any from .hill_climbing import SearchProblem def a_ ( __lowercase : Tuple , __lowercase : Tuple = True , __lowercase : List[str] = math.inf , __lowercase : List[Any] = -math.inf , __lowercase : str = math.inf , __lowercase : Dict = -math.inf , __lowercase : Optional[int] = False , __lowercase : Optional[Any] = 100 , __lowercase : Optional[int] = 0.0_1 , __lowercase : int = 1 , ) -> Any: _snake_case = False _snake_case = search_prob _snake_case = start_temperate _snake_case = [] _snake_case = 0 _snake_case = None while not search_end: _snake_case = current_state.score() if best_state is None or current_score > best_state.score(): _snake_case = current_state scores.append(__a ) iterations += 1 _snake_case = None _snake_case = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to _snake_case = random.randint(0 , len(__a ) - 1 ) # picking a random neighbor _snake_case = neighbors.pop(__a ) _snake_case = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: _snake_case = change * -1 # in case we are finding minimum if change > 0: # improves the solution _snake_case = picked_neighbor else: _snake_case = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability _snake_case = picked_neighbor _snake_case = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor _snake_case = True else: _snake_case = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__a ) , __a ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def a_ ( __lowercase : Union[str, Any] , __lowercase : Dict ) -> Tuple: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _lowerCamelCase : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : Dict = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) # starting the problem with initial coordinates (12, 47) _lowerCamelCase : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : List[str] = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) def a_ ( __lowercase : int , __lowercase : List[str] ) -> Any: return (3 * x**2) - (6 * y) _lowerCamelCase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'{local_min.score()}' ) _lowerCamelCase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : Optional[Any] = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'{local_min.score()}' )
282
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ :str = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :str = min_resolution UpperCamelCase__ :Optional[Any] = max_resolution UpperCamelCase__ :int = do_resize UpperCamelCase__ :Optional[Any] = size UpperCamelCase__ :Tuple = do_normalize UpperCamelCase__ :List[Any] = image_mean UpperCamelCase__ :Dict = image_std UpperCamelCase__ :Union[str, Any] = do_rescale UpperCamelCase__ :Union[str, Any] = rescale_factor UpperCamelCase__ :Union[str, Any] = do_pad def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if not batched: UpperCamelCase__ :List[str] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ :Dict = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ :int = self.size['''shortest_edge'''] UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ :str = self.size['''shortest_edge'''] UpperCamelCase__ :str = self.size['''shortest_edge'''] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.loads(f.read() ) UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target} # encode them UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify orig_size UpperCamelCase__ :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ ) # verify orig_size UpperCamelCase__ :List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
97
0
from importlib import import_module from .logging import get_logger lowerCamelCase : str =get_logger(__name__) class __a : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=None ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) ) UpperCamelCase__ : str = module._original_module if isinstance(UpperCamelCase_ , _PatchedModuleObj ) else module class __a : _lowerCAmelCase : List[str] = [] def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=None ): '''simple docstring''' UpperCamelCase__ : Optional[int] = obj UpperCamelCase__ : Tuple = target UpperCamelCase__ : int = new UpperCamelCase__ : Union[str, Any] = target.split("." )[0] UpperCamelCase__ : Union[str, Any] = {} UpperCamelCase__ : Any = attrs or [] def __enter__( self : str ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(UpperCamelCase_ ) ): try: UpperCamelCase__ : List[Any] = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): UpperCamelCase__ : int = getattr(self.obj , UpperCamelCase_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(UpperCamelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): UpperCamelCase__ : Tuple = obj_attr # patch at top level setattr(self.obj , UpperCamelCase_ , _PatchedModuleObj(UpperCamelCase_ , attrs=self.attrs ) ) UpperCamelCase__ : List[Any] = getattr(self.obj , UpperCamelCase_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(UpperCamelCase_ , UpperCamelCase_ , _PatchedModuleObj(getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , attrs=self.attrs ) ) UpperCamelCase__ : Tuple = getattr(UpperCamelCase_ , UpperCamelCase_ ) # finally set the target attribute setattr(UpperCamelCase_ , UpperCamelCase_ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: UpperCamelCase__ : Dict = getattr(import_module(".".join(UpperCamelCase_ ) ) , UpperCamelCase_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , UpperCamelCase_ ) is attr_value: UpperCamelCase__ : Optional[int] = getattr(self.obj , UpperCamelCase_ ) setattr(self.obj , UpperCamelCase_ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" UpperCamelCase__ : Optional[Any] = globals()['''__builtins__'''][target_attr] setattr(self.obj , UpperCamelCase_ , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , UpperCamelCase_ , self.original.pop(UpperCamelCase_ ) ) def __lowercase ( self : str ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def __lowercase ( self : str ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
189
'''simple docstring''' from collections import defaultdict class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCamelCase__ :Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1 def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCamelCase__ :Optional[int] = total_ways_util return self.dp[mask][task_no] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": __snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
97
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class _snake_case ( unittest.TestCase ): def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:Tuple = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE:int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on SCREAMING_SNAKE_CASE:int = dict(zip(UpperCamelCase_ ,range(len(UpperCamelCase_ ) ) ) ) SCREAMING_SNAKE_CASE:Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] SCREAMING_SNAKE_CASE:Union[str, Any] = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE:Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE:Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase_ ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase_ ) ) SCREAMING_SNAKE_CASE:Union[str, Any] = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], } SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,UpperCamelCase_ ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(UpperCamelCase_ ,UpperCamelCase_ ) def __UpperCamelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : int ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase_ ) def __UpperCamelCase ( self : List[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase_ ) def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ): return ViTImageProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase_ ) def __UpperCamelCase ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : int ): SCREAMING_SNAKE_CASE:List[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] SCREAMING_SNAKE_CASE:Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE:Dict = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE:Dict = self.get_image_processor() SCREAMING_SNAKE_CASE:Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE:str = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE:Any = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCamelCase_ ) def __UpperCamelCase ( self : Dict ): SCREAMING_SNAKE_CASE:List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE:Dict = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) SCREAMING_SNAKE_CASE:Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ ,padding_value=1.0 ) SCREAMING_SNAKE_CASE:List[str] = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=UpperCamelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCamelCase_ ) def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:Tuple = self.get_image_processor() SCREAMING_SNAKE_CASE:str = self.get_tokenizer() SCREAMING_SNAKE_CASE:List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:Optional[int] = image_processor(UpperCamelCase_ ,return_tensors="np" ) SCREAMING_SNAKE_CASE:Dict = processor(images=UpperCamelCase_ ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:Tuple = self.get_image_processor() SCREAMING_SNAKE_CASE:Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:List[str] = '''lower newer''' SCREAMING_SNAKE_CASE:str = processor(text=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Any = tokenizer(UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __UpperCamelCase ( self : Any ): SCREAMING_SNAKE_CASE:List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE:Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE:List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Tuple = '''lower newer''' SCREAMING_SNAKE_CASE:List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:List[Any] = processor(text=UpperCamelCase_ ,images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def __UpperCamelCase ( self : Any ): SCREAMING_SNAKE_CASE:Any = self.get_image_processor() SCREAMING_SNAKE_CASE:Any = self.get_tokenizer() SCREAMING_SNAKE_CASE:Dict = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:List[str] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:List[Any] = processor(images=UpperCamelCase_ ,visual_prompt=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "conditional_pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def __UpperCamelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE:Dict = self.get_image_processor() SCREAMING_SNAKE_CASE:Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE:Union[str, Any] = processor.batch_decode(UpperCamelCase_ ) SCREAMING_SNAKE_CASE:Dict = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ ,UpperCamelCase_ )
139
'''simple docstring''' import csv import tweepy # Twitter API credentials __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' __snake_case = '''''' def a ( __a ) -> None: '''simple docstring''' UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a ) auth.set_access_token(__a , __a ) UpperCamelCase__ :List[str] = tweepy.API(__a ) # initialize a list to hold all the tweepy Tweets UpperCamelCase__ :Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 ) # save most recent tweets alltweets.extend(__a ) # save the id of the oldest tweet less one UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__a ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates UpperCamelCase__ :Union[str, Any] = api.user_timeline( screen_name=__a , count=200 , max_id=__a ) # save most recent tweets alltweets.extend(__a ) # update the id of the oldest tweet less one UpperCamelCase__ :Tuple = alltweets[-1].id - 1 print(f'''...{len(__a )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f: UpperCamelCase__ :Tuple = csv.writer(__a ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__a ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
97
0
from datetime import datetime import matplotlib.pyplot as plt import torch def UpperCamelCase( __UpperCamelCase : Tuple ): for param in module.parameters(): lowerCAmelCase_ : Dict = False def UpperCamelCase( ): lowerCAmelCase_ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase_ : Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def UpperCamelCase( __UpperCamelCase : List[Any] ): lowerCAmelCase_ : Dict = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def UpperCamelCase( ): lowerCAmelCase_ : int = datetime.now() lowerCAmelCase_ : str = current_time.strftime('''%H:%M:%S''' ) return timestamp
103
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def a ( __a , __a , __a ) -> List[str]: '''simple docstring''' UpperCamelCase__ :List[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val def a ( __a ) -> Any: '''simple docstring''' UpperCamelCase__ :Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCamelCase__ :List[str] = value else: UpperCamelCase__ :Dict = value return new_state_dict def a ( __a ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Tuple = in_proj_bias[:256] UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :] UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512] UpperCamelCase__ :Tuple = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ :Any = in_proj_weight[:256, :] UpperCamelCase__ :Optional[int] = in_proj_bias[:256] UpperCamelCase__ :Tuple = in_proj_weight[256:512, :] UpperCamelCase__ :Dict = in_proj_bias[256:512] UpperCamelCase__ :Any = in_proj_weight[-256:, :] UpperCamelCase__ :Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ :List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256] UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512] UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:] def a ( __a , __a ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :str = image.size UpperCamelCase__ :Optional[Any] = max(__a , __a ) UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000 UpperCamelCase__ :Dict = target_max_size / current_max_size UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a ( __a ) -> int: '''simple docstring''' UpperCamelCase__ :Any = F.to_tensor(__a ) UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def a ( __a , __a , __a ) -> Dict: '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__a , __a , __a ) UpperCamelCase__ :Any = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ :Dict = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCamelCase__ :Optional[Any] = state_dict.pop(__a ) UpperCamelCase__ :int = val # create HuggingFace model and load state dict UpperCamelCase__ :str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ :List[str] = 15 UpperCamelCase__ :int = 2 UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''} UpperCamelCase__ :int = idalabel UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ :int = 125 UpperCamelCase__ :List[str] = 6 UpperCamelCase__ :Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCamelCase__ :Dict = idalabel UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ :List[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCamelCase__ :int = TableTransformerForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() # verify our conversion UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a ) UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' ) UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 ) UpperCamelCase__ :Optional[int] = model(__a ) if "detection" in checkpoint_url: UpperCamelCase__ :Dict = (1, 15, 3) UpperCamelCase__ :List[Any] = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: UpperCamelCase__ :Optional[Any] = (1, 125, 7) UpperCamelCase__ :Dict = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCamelCase__ :Union[str, Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__a ) image_processor.push_to_hub(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
97
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class a__ ( A__ ): _SCREAMING_SNAKE_CASE : Dict = 'trajectory_transformer' _SCREAMING_SNAKE_CASE : str = ['past_key_values'] _SCREAMING_SNAKE_CASE : Optional[Any] = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _UpperCamelCase=100 , _UpperCamelCase=5 , _UpperCamelCase=1 , _UpperCamelCase=1 , _UpperCamelCase=249 , _UpperCamelCase=6 , _UpperCamelCase=17 , _UpperCamelCase=25 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase=128 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0_0_0_6 , _UpperCamelCase=512 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=1 , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=50256 , _UpperCamelCase=50256 , **_UpperCamelCase , ): """simple docstring""" _lowercase : int = vocab_size _lowercase : str = action_weight _lowercase : Dict = reward_weight _lowercase : Optional[int] = value_weight _lowercase : List[Any] = max_position_embeddings _lowercase : int = block_size _lowercase : Optional[Any] = action_dim _lowercase : Union[str, Any] = observation_dim _lowercase : int = transition_dim _lowercase : int = learning_rate _lowercase : int = n_layer _lowercase : int = n_head _lowercase : List[Any] = n_embd _lowercase : Optional[int] = embd_pdrop _lowercase : Dict = attn_pdrop _lowercase : List[str] = resid_pdrop _lowercase : Optional[Any] = initializer_range _lowercase : Any = layer_norm_eps _lowercase : Optional[int] = kaiming_initializer_range _lowercase : Dict = use_cache super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
250
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( __a ) -> bool: '''simple docstring''' UpperCamelCase__ :int = int(number**0.5 ) return number == sq * sq def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]: '''simple docstring''' UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den UpperCamelCase__ :int = x_den * y_den * z_den UpperCamelCase__ :int = gcd(__a , __a ) top //= hcf bottom //= hcf return top, bottom def a ( __a = 35 ) -> int: '''simple docstring''' UpperCamelCase__ :set = set() UpperCamelCase__ :int UpperCamelCase__ :Fraction = Fraction(0 ) UpperCamelCase__ :tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 UpperCamelCase__ :int = x_num * y_den + x_den * y_num UpperCamelCase__ :Any = x_den * y_den UpperCamelCase__ :Tuple = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Optional[int] = int(sqrt(__a ) ) UpperCamelCase__ :int = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Tuple = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=-1 UpperCamelCase__ :Tuple = x_num * y_num UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den UpperCamelCase__ :List[str] = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :Union[str, Any] = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) # n=2 UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num UpperCamelCase__ :Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): UpperCamelCase__ :str = int(sqrt(__a ) ) UpperCamelCase__ :Any = int(sqrt(__a ) ) UpperCamelCase__ :Dict = gcd(__a , __a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase__ :int = add_three( __a , __a , __a , __a , __a , __a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a , __a ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
97
0