code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->list: """simple docstring""" if len(UpperCAmelCase ) <= 1: return lst a_ = 1 while i < len(UpperCAmelCase ): if lst[i - 1] <= lst[i]: i += 1 else: a_ , a_ = lst[i], lst[i - 1] i -= 1 if i == 0: a_ = 1 return lst if __name__ == "__main__": UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCamelCase_ = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
303
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
1
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers UpperCamelCase_ = float('nan') class snake_case : def __init__( self , __UpperCAmelCase) ->Optional[Any]: a_ = sys.stdout a_ = open(__UpperCAmelCase , "a") def __getattr__( self , __UpperCAmelCase) ->Any: return getattr(self.stdout , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple: self.stdout.write(__UpperCAmelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __UpperCAmelCase , 0 , re.M)) def UpperCamelCase ( UpperCAmelCase=80 , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = [] # deal with critical env vars a_ = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: a_ = os.environ.get(UpperCAmelCase , UpperCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) a_ = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes a_ = [] a_ = "" while len(UpperCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCAmelCase ) a_ = "" return "\\\n".join(UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own a_ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir a_ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) a_ = subprocess.run(UpperCAmelCase , capture_output=UpperCAmelCase , text=UpperCAmelCase ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams a_ = variation.replace(" " , "-" ) with open(Path(UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f: a_ = json.load(UpperCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Optional[Any]: """simple docstring""" a_ = [] a_ = [] a_ = F'''{id}: {variation:<{longest_variation_len}}''' a_ = F'''{preamble}: ''' a_ = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCAmelCase ) , desc=UpperCAmelCase , leave=UpperCAmelCase ): a_ = process_run_single( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = single_run_metrics[target_metric_key] if not math.isnan(UpperCAmelCase ): metrics.append(UpperCAmelCase ) results.append(UpperCAmelCase ) outcome += "✓" else: outcome += "✘" a_ = F'''\33[2K\r{outcome}''' if len(UpperCAmelCase ) > 0: a_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} a_ = round(mean_metrics[target_metric_key] , 2 ) a_ = F'''{outcome} {mean_target}''' if len(UpperCAmelCase ) > 1: results_str += F''' {tuple(round(UpperCAmelCase , 2 ) for x in results )}''' print(UpperCAmelCase ) a_ = variation return mean_metrics else: print(UpperCAmelCase ) return {variation_key: variation, target_metric_key: nan} def UpperCamelCase ( ) ->Any: """simple docstring""" a_ = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = pd.DataFrame(UpperCAmelCase ) a_ = "variation" a_ = "diff_%" a_ = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan a_ = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCAmelCase ): # as a fallback, use the minimal value as the sentinel a_ = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCAmelCase ): a_ = df.apply( lambda UpperCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns a_ = [variation_key, target_metric_key, diff_key, *report_metric_keys] a_ = df.reindex(UpperCAmelCase , axis="columns" ) # reorder cols # capitalize a_ = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible a_ = df.rename(lambda UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" ) a_ = df.rename(lambda UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" ) a_ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCAmelCase , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCAmelCase , floatfmt=".2f" )] print("\n\n".join(UpperCAmelCase ) ) def UpperCamelCase ( ) ->Optional[Any]: """simple docstring""" a_ = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCAmelCase , type=UpperCAmelCase , nargs="+" , required=UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCAmelCase , type=UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) a_ = parser.parse_args() a_ = args.output_dir Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) a_ = get_base_command(UpperCAmelCase , UpperCAmelCase ) # split each dimension into its --foo variations a_ = [list(map(str.strip , re.split(r"\|" , UpperCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty a_ = list(map(str.strip , map(" ".join , itertools.product(*UpperCAmelCase ) ) ) ) a_ = max(len(UpperCAmelCase ) for x in variations ) # split wanted keys a_ = args.report_metric_keys.split() # capture prints into a log file for convenience a_ = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) a_ = Tee(UpperCAmelCase ) print(F'''\n*** Running {len(UpperCAmelCase )} benchmarks:''' ) print(F'''Base command: {" ".join(UpperCAmelCase )}''' ) a_ = "variation" a_ = [] for id, variation in enumerate(tqdm(UpperCAmelCase , desc="Total completion: " , leave=UpperCAmelCase ) ): a_ = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , args.target_metric_key , UpperCAmelCase , args.repeat_times , UpperCAmelCase , args.verbose , ) ) process_results(UpperCAmelCase , args.target_metric_key , UpperCAmelCase , args.base_variation , UpperCAmelCase ) if __name__ == "__main__": main()
303
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase_ = 701 UpperCamelCase_ = 1000000000 UpperCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
303
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Union[str, Any] = BartphoTokenizer a_ : Optional[Any] = False a_ : Dict = True def UpperCAmelCase__ ( self) ->Optional[Any]: super().setUp() a_ = ["▁This", "▁is", "▁a", "▁t", "est"] a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase)))) a_ = {"unk_token": "<unk>"} a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''') a_ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple: kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: a_ = "This is a là test" a_ = "This is a<unk><unk> test" return input_text, output_text def UpperCAmelCase__ ( self) ->int: a_ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map) a_ = "This is a là test" a_ = "▁This ▁is ▁a ▁l à ▁t est".split() a_ = tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a_ = tokens + [tokenizer.unk_token] a_ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase)
303
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
1
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Dict: a_ = inspect.getfile(accelerate.test_utils) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) a_ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def UpperCAmelCase__ ( self) ->Any: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->str: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''') with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->List[Any]: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) if __name__ == "__main__": UpperCamelCase_ = Accelerator() UpperCamelCase_ = (accelerator.state.process_index + 2, 10) UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase_ = '' UpperCamelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
303
1
"""simple docstring""" from math import factorial def UpperCamelCase ( UpperCAmelCase = 20 ) ->int: """simple docstring""" a_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... a_ = n // 2 return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCamelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
303
"""simple docstring""" from heapq import heappop, heappush import numpy as np def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" a_ , a_ = grid.shape a_ = [-1, 1, 0, 0] a_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] a_ , a_ = [(0, source)], set() a_ = np.full((rows, cols) , np.inf ) a_ = 0 a_ = np.empty((rows, cols) , dtype=UpperCAmelCase ) a_ = None while queue: ((a_) , (a_)) = heappop(UpperCAmelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: a_ = [] while (x, y) != source: path.append((x, y) ) a_ , a_ = predecessors[x, y] path.append(UpperCAmelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase ) ): a_ , a_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: a_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase , (dist + 1, (nx, ny)) ) a_ = dist + 1 a_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class snake_case : def __init__( self) ->Optional[int]: a_ = WATERMARK_BITS a_ = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images] a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2) a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0) return images
303
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'facebook/deit-base-distilled-patch16-224': ( 'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Optional[int] = """deit""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , ) ->Dict: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Dict = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def UpperCAmelCase__ ( self) ->float: return 1E-4
303
"""simple docstring""" import math UpperCamelCase_ = 10 UpperCamelCase_ = 7 UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS def UpperCamelCase ( UpperCAmelCase = 20 ) ->str: """simple docstring""" a_ = math.comb(UpperCAmelCase , UpperCAmelCase ) a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase ) a_ = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
303
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = """xlm-roberta""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCamelCase_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: a_ = k.replace(UpperCAmelCase , UpperCAmelCase ) return k def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration: """simple docstring""" a_ = DEFAULTS.copy() cfg_kwargs.update(UpperCAmelCase ) a_ = PegasusConfig(**UpperCAmelCase ) a_ = PegasusForConditionalGeneration(UpperCAmelCase ) a_ = torch_model.model.state_dict() a_ = {} for k, v in tf_weights.items(): a_ = rename_state_dict_key(UpperCAmelCase ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: a_ = v.T a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) a_ = mapping["shared.weight"] a_ = mapping["shared.weight"] a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCAmelCase ) a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) a_ = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict: """simple docstring""" a_ = tf.train.list_variables(UpperCAmelCase ) a_ = {} a_ = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ): a_ = any(pat in name for pat in ignore_name ) if skip_key: continue a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase ) a_ = array return tf_weights def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = Path(UpperCAmelCase ).parent.name a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"] a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCAmelCase ) # convert model a_ = get_tf_weights_as_numpy(UpperCAmelCase ) a_ = task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": a_ = task_specific_params a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase ) torch_model.save_pretrained(UpperCAmelCase ) a_ = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase_ = parser.parse_args() if args.save_dir is None: UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name UpperCamelCase_ = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
303
1
"""simple docstring""" import math def UpperCamelCase ( UpperCAmelCase ) ->bool: """simple docstring""" a_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase = 1 / 12_345 ) ->int: """simple docstring""" a_ = 0 a_ = 0 a_ = 3 while True: a_ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(UpperCAmelCase ): a_ = int(UpperCAmelCase ) total_partitions += 1 if check_partition_perfect(UpperCAmelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(UpperCAmelCase ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
303
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = use_labels a_ = scope def UpperCAmelCase__ ( self) ->Any: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self) ->Optional[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self) ->List[str]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str: a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]: a_ = True a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]: a_ = True a_ = True a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval() # first forward pass a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple: a_ = BertGenerationDecoder(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self) ->str: a_ , a_ , a_ , a_ = self.prepare_config_and_inputs() a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () a_ : List[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = BertGenerationEncoderTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs() a_ = "bert" self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: # This regression test was failing with PyTorch < 1.3 ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() a_ = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(__UpperCAmelCase) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 10_24]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 5_03_58]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" return max(metric_fn(UpperCAmelCase , UpperCAmelCase ) for gt in ground_truths ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()] a_ = [] if args.gold_data_mode == "qa": a_ = pd.read_csv(UpperCAmelCase , sep="\t" , header=UpperCAmelCase ) for answer_list in data[1]: a_ = ast.literal_eval(UpperCAmelCase ) answers.append(UpperCAmelCase ) else: a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()] a_ = [[reference] for reference in references] a_ = a_ = a_ = 0 for prediction, ground_truths in zip(UpperCAmelCase , UpperCAmelCase ): total += 1 em += metric_max_over_ground_truths(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) fa += metric_max_over_ground_truths(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = 100.0 * em / total a_ = 100.0 * fa / total logger.info(F'''F1: {fa:.2f}''' ) logger.info(F'''EM: {em:.2f}''' ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = args.k a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()] a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()] a_ = a_ = 0 for hypo, reference in zip(UpperCAmelCase , UpperCAmelCase ): a_ = set(hypo.split("\t" )[:k] ) a_ = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a_ = 100.0 * em / total logger.info(F'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]: """simple docstring""" def strip_title(UpperCAmelCase ): if title.startswith("\"" ): a_ = title[1:] if title.endswith("\"" ): a_ = title[:-1] return title a_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( UpperCAmelCase , return_tensors="pt" , padding=UpperCAmelCase , truncation=UpperCAmelCase , )["input_ids"].to(args.device ) a_ = rag_model.rag.question_encoder(UpperCAmelCase ) a_ = question_enc_outputs[0] a_ = rag_model.retriever( UpperCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a_ = [] for docs in all_docs: a_ = [strip_title(UpperCAmelCase ) for title in docs["title"]] provenance_strings.append("\t".join(UpperCAmelCase ) ) return provenance_strings def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]: """simple docstring""" with torch.no_grad(): a_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( UpperCAmelCase , return_tensors="pt" , padding=UpperCAmelCase , truncation=UpperCAmelCase ) a_ = inputs_dict.input_ids.to(args.device ) a_ = inputs_dict.attention_mask.to(args.device ) a_ = rag_model.generate( # rag_model overwrites generate UpperCAmelCase , attention_mask=UpperCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a_ = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) if args.print_predictions: for q, a in zip(UpperCAmelCase , UpperCAmelCase ): logger.info("Q: {} - A: {}".format(UpperCAmelCase , UpperCAmelCase ) ) return answers def UpperCamelCase ( ) ->int: """simple docstring""" a_ = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=UpperCAmelCase , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=UpperCAmelCase , choices=["exact", "compressed", "legacy"] , type=UpperCAmelCase , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=UpperCAmelCase , type=UpperCAmelCase , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=UpperCAmelCase , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=UpperCAmelCase , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=UpperCAmelCase , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=UpperCAmelCase , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=UpperCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=UpperCAmelCase , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=UpperCAmelCase , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=UpperCAmelCase , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=UpperCAmelCase , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a_ = parser.parse_args() a_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = {} if args.model_type is None: a_ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a_ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a_ = args.n_docs if args.index_name is not None: a_ = args.index_name if args.index_path is not None: a_ = args.index_path else: a_ = BartForConditionalGeneration a_ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , UpperCAmelCase ) a_ = get_scores if args.eval_mode == "e2e" else get_precision_at_k a_ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(UpperCAmelCase , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(UpperCAmelCase ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a_ = RagRetriever.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) a_ = model_class.from_pretrained(UpperCAmelCase , retriever=UpperCAmelCase , **UpperCAmelCase ) model.retriever.init_retrieval() else: a_ = model_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a_ = [] for line in tqdm(UpperCAmelCase ): questions.append(line.strip() ) if len(UpperCAmelCase ) == args.eval_batch_size: a_ = evaluate_batch_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) preds_file.write("\n".join(UpperCAmelCase ) + "\n" ) preds_file.flush() a_ = [] if len(UpperCAmelCase ) > 0: a_ = evaluate_batch_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) preds_file.write("\n".join(UpperCAmelCase ) ) preds_file.flush() score_fn(UpperCAmelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase_ = get_args() main(args)
303
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if "resnet-50" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase ) # set label attributes a_ = "panoptic" in model_name if is_panoptic: a_ = 250 else: a_ = 91 a_ = "huggingface/label-files" a_ = "coco-detection-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = state_dict.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = "" if is_panoptic: a_ = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a_ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a_ = in_proj_weight_cross_attn[:256, :] a_ = in_proj_bias_cross_attn[:256] a_ = in_proj_weight_cross_attn[256:512, :] a_ = in_proj_bias_cross_attn[256:512] a_ = in_proj_weight_cross_attn[-256:, :] a_ = in_proj_bias_cross_attn[-256:] def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]: """simple docstring""" a_ , a_ = get_detr_config(UpperCAmelCase ) # load original model from torch hub a_ = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(F'''Converting model {model_name}...''' ) a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval() a_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase ): if is_panoptic: a_ = "detr." + src rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a_ = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a_ = state_dict.pop(UpperCAmelCase ) a_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val # finally, create HuggingFace model and load state dict a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) model.eval() # verify our conversion on an image a_ = "coco_panoptic" if is_panoptic else "coco_detection" a_ = DetrImageProcessor(format=UpperCAmelCase ) a_ = processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = detr(UpperCAmelCase ) a_ = model(UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging UpperCamelCase_ = { 'cola': 2, 'mnli': 3, 'mrpc': 2, 'sst-2': 2, 'sts-b': 1, 'qqp': 2, 'qnli': 2, 'rte': 2, 'wnli': 2, } logging.set_verbosity_info() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) ->Any: """simple docstring""" a_ = XLNetConfig.from_json_file(UpperCAmelCase ) a_ = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) a_ = finetuning_task a_ = GLUE_TASKS_NUM_LABELS[finetuning_task] a_ = XLNetForSequenceClassification(UpperCAmelCase ) elif "squad" in finetuning_task: a_ = finetuning_task a_ = XLNetForQuestionAnswering(UpperCAmelCase ) else: a_ = XLNetLMHeadModel(UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Save pytorch-model a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase )}''' ) torch.save(model.state_dict() , UpperCAmelCase ) print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase )}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--xlnet_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained XLNet model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--finetuning_task', default=None, type=str, help='Name of a task on which the XLNet TensorFlow model was fine-tuned', ) UpperCamelCase_ = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
303
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase ( UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: a_ = [3, 3, 3, 3] a_ = [5, 5, 5, 5] elif "fl4" in model_name: a_ = [4, 4, 4, 4] a_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: a_ = [3, 3, 3, 3] if "lrf" in model_name: a_ = [3, 3, 3, 3] else: a_ = [2, 2, 2, 2] if "tiny" in model_name: a_ = 96 elif "small" in model_name: a_ = 96 elif "base" in model_name: a_ = 128 elif "large" in model_name: a_ = 192 elif "xlarge" in model_name: a_ = 256 elif "huge" in model_name: a_ = 352 # set label information a_ = "huggingface/label-files" if "large" in model_name or "huge" in model_name: a_ = "imagenet-22k-id2label.json" else: a_ = "imagenet-1k-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = {v: k for k, v in idalabel.items()} a_ = FocalNetConfig( embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , ) return config def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" if "patch_embed.proj" in name: a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a_ = "encoder." + name if "encoder.layers" in name: a_ = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: a_ = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: a_ = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: a_ = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: a_ = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: a_ = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": a_ = "layernorm.weight" if name == "norm.bias": a_ = "layernorm.bias" if "head" in name: a_ = name.replace("head" , "classifier" ) else: a_ = "focalnet." + name return name def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict: """simple docstring""" a_ = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on a_ = model_name_to_url[model_name] print("Checkpoint URL: " , UpperCAmelCase ) a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): a_ = state_dict.pop(UpperCAmelCase ) a_ = val a_ = get_focalnet_config(UpperCAmelCase ) a_ = FocalNetForImageClassification(UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(UpperCAmelCase ) # verify conversion a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = BitImageProcessor( do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , ) a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) a_ = processor(images=UpperCAmelCase , return_tensors="pt" ) a_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 ) a_ = model(**UpperCAmelCase ) a_ = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": a_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": a_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": a_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": a_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": a_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": a_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCamelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=1 , ) ->Any: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = num_choices a_ = scope a_ = q_groups a_ = k_groups a_ = v_groups a_ = post_attention_groups a_ = intermediate_groups a_ = output_groups def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) a_ = None a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = ids_tensor([self.batch_size] , self.num_choices) a_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self) ->int: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict: a_ = SqueezeBertModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , __UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any: a_ = SqueezeBertForMaskedLM(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple: a_ = self.num_labels a_ = SqueezeBertForSequenceClassification(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict: a_ = self.num_labels a_ = SqueezeBertForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: a_ = self.num_choices a_ = SqueezeBertForMultipleChoice(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase__ ( self) ->Any: a_ = self.prepare_config_and_inputs() ((a_) , (a_) , (a_) , (a_) , (a_) , (a_)) = config_and_inputs a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Optional[Any] = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) a_ : Union[str, Any] = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) a_ : List[Any] = False a_ : Tuple = True a_ : Optional[int] = False def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = SqueezeBertModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , dim=37) def UpperCAmelCase__ ( self) ->Tuple: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Any: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->int: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = SqueezeBertModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @require_sentencepiece @require_tokenizers @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli") a_ = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]]) a_ = model(__UpperCAmelCase)[0] a_ = torch.Size((1, 3)) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]]) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4))
303
"""simple docstring""" import os import numpy import onnx def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = a.name a_ = b.name a_ = "" a_ = "" a_ = a == b a_ = name_a a_ = name_b return res def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = list(model.graph.initializer ) a_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a_ = inits[i].name a_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = os.path.dirname(UpperCAmelCase ) a_ = os.path.basename(UpperCAmelCase ) a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) a_ = list(model.graph.initializer ) a_ = set() a_ = {} a_ = [] a_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) a_ = inits[j].data_type a_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , UpperCAmelCase ) total_reduced_size += mem_size a_ = inits[i].name a_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: a_ = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" ) a_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = "optimized_" + model_file_name a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
303
1
"""simple docstring""" UpperCamelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} UpperCamelCase_ = ['a', 'b', 'c', 'd', 'e'] def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = start # add current to visited visited.append(UpperCAmelCase ) a_ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # if all neighbors visited add current to sort sort.append(UpperCAmelCase ) # if all vertices haven't been visited select a new one to visit if len(UpperCAmelCase ) != len(UpperCAmelCase ): for vertice in vertices: if vertice not in visited: a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # return sort return sort if __name__ == "__main__": UpperCamelCase_ = topological_sort('a', [], []) print(sort)
303
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str: a_ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } a_ = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): a_ = token_dict["token"] a_ = Tokenizer(Unigram()) a_ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}") , " "), normalizers.Lowercase(), ]) a_ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase), pre_tokenizers.Digits(individual_digits=__UpperCAmelCase), pre_tokenizers.Punctuation(), ]) a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase) a_ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) a_ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [files] self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = json.loads(self._tokenizer.to_str()) a_ = self.special_tokens["unk"]["id"] a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
303
1
"""simple docstring""" from scipy.stats import spearmanr import datasets UpperCamelCase_ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' UpperCamelCase_ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' UpperCamelCase_ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): def UpperCAmelCase__ ( self) ->Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]: a_ = spearmanr(__UpperCAmelCase , __UpperCAmelCase) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
303
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_clap': [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapAudioConfig', 'ClapConfig', 'ClapTextConfig', ], 'processing_clap': ['ClapProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection', ] UpperCamelCase_ = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = """audio-spectrogram-transformer""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = patch_size a_ = qkv_bias a_ = frequency_stride a_ = time_stride a_ = max_length a_ = num_mel_bins
303
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = """git_vision_model""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=30_72 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase="quick_gelu" , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ) ->int: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = intermediate_size a_ = num_hidden_layers a_ = num_attention_heads a_ = num_channels a_ = patch_size a_ = image_size a_ = initializer_range a_ = attention_dropout a_ = layer_norm_eps a_ = hidden_act @classmethod def UpperCAmelCase__ ( cls , __UpperCAmelCase , **__UpperCAmelCase) ->"PretrainedConfig": cls._set_token_in_kwargs(__UpperCAmelCase) a_ , a_ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase) # get the vision config dict if we are loading from GITConfig if config_dict.get("model_type") == "git": a_ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase) class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : int = """git""" def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=6 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10_24 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1_01 , __UpperCAmelCase=1_02 , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Optional[int]: super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) if vision_config is None: a_ = {} logger.info("vision_config is None. initializing the GitVisionConfig with default values.") a_ = GitVisionConfig(**__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = tie_word_embeddings a_ = num_image_with_embedding a_ = bos_token_id a_ = eos_token_id def UpperCAmelCase__ ( self) ->Tuple: a_ = copy.deepcopy(self.__dict__) a_ = self.vision_config.to_dict() a_ = self.__class__.model_type return output
303
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = """xlm-roberta""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
1
"""simple docstring""" from __future__ import annotations def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(UpperCAmelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(UpperCAmelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
303
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = scope a_ = range_bbox def UpperCAmelCase__ ( self) ->int: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: a_ = bbox[i, j, 3] a_ = bbox[i, j, 1] a_ = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ = bbox[i, j, 2] a_ = bbox[i, j, 0] a_ = t a_ = None if self.use_input_mask: a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self) ->List[str]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any: a_ = LiltModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]: a_ = self.num_labels a_ = LiltForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict: a_ = LiltForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self) ->str: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a_ : List[str] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) a_ : Any = False a_ : Dict = False def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: return True def UpperCAmelCase__ ( self) ->str: a_ = LiltModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->List[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = LiltModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @require_torch @slow class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->List[Any]: a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase) a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase) a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase) # forward pass with torch.no_grad(): a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase) a_ = torch.Size([1, 2, 7_68]) a_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
303
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO: upload to AWS UpperCamelCase_ = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = """retribert""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=8 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=1_28 , __UpperCAmelCase=0 , **__UpperCAmelCase , ) ->int: super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = share_encoders a_ = projection_dim
303
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case ( SCREAMING_SNAKE_CASE_ ): def UpperCAmelCase__ ( self) ->Any: a_ = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim")) self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads")) class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]: a_ = parent a_ = batch_size a_ = image_size a_ = patch_sizes a_ = patch_stride a_ = patch_padding a_ = is_training a_ = use_labels a_ = num_labels a_ = num_channels a_ = embed_dim a_ = num_heads a_ = stride_kv a_ = depth a_ = cls_token a_ = attention_drop_rate a_ = initializer_range a_ = layer_norm_eps def UpperCAmelCase__ ( self) ->Any: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a_ = None if self.use_labels: # create a random int32 tensor of given shape a_ = ids_tensor([self.batch_size] , self.num_labels) a_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self) ->Union[str, Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]: a_ = TFCvtModel(config=__UpperCAmelCase) a_ = model(__UpperCAmelCase , training=__UpperCAmelCase) a_ = (self.image_size, self.image_size) a_ , a_ = image_size[0], image_size[1] for i in range(len(self.depth)): a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = self.num_labels a_ = TFCvtForImageClassification(__UpperCAmelCase) a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self) ->Tuple: a_ = self.prepare_config_and_inputs() a_ , a_ , a_ = config_and_inputs a_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () a_ : List[Any] = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) a_ : Any = False a_ : Dict = False a_ : Optional[int] = False a_ : List[Any] = False a_ : List[Any] = False def UpperCAmelCase__ ( self) ->List[str]: a_ = TFCvtModelTester(self) a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[str]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def UpperCAmelCase__ ( self) ->Dict: pass @unittest.skip(reason="Cvt does not use inputs_embeds") def UpperCAmelCase__ ( self) ->List[str]: pass @unittest.skip(reason="Cvt does not support input and output embeddings") def UpperCAmelCase__ ( self) ->Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def UpperCAmelCase__ ( self) ->Dict: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def UpperCAmelCase__ ( self) ->List[str]: super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8") def UpperCAmelCase__ ( self) ->Dict: a_ = tf.keras.mixed_precision.Policy("mixed_float16") tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32") def UpperCAmelCase__ ( self) ->Optional[int]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) a_ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase): a_ = model_class(__UpperCAmelCase) a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a_ = outputs.hidden_states a_ = len(self.model_tester.depth) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = TFCvtModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self) ->int: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def UpperCAmelCase__ ( self) ->Any: a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf") # forward pass a_ = model(**__UpperCAmelCase) # verify the logits a_ = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , __UpperCAmelCase) a_ = tf.constant([0.9_285, 0.9_015, -0.3_150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCamelCase_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCamelCase_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCamelCase_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } UpperCamelCase_ = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } UpperCamelCase_ = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } UpperCamelCase_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } UpperCamelCase_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } UpperCamelCase_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = VOCAB_FILES_NAMES a_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a_ : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[str] = VOCAB_FILES_NAMES a_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) UpperCamelCase_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) UpperCamelCase_ = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) class snake_case : def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->BatchEncoding: if titles is None and texts is None: return super().__call__( __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) elif titles is None or texts is None: a_ = titles if texts is None else texts return super().__call__( __UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) a_ = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase) else [titles] a_ = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase) else [texts] a_ = len(__UpperCAmelCase) a_ = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase) else [questions] * n_passages if len(__UpperCAmelCase) != len(__UpperCAmelCase): raise ValueError( F'''There should be as many titles than texts but got {len(__UpperCAmelCase)} titles and {len(__UpperCAmelCase)} texts.''') a_ = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase)["input_ids"] a_ = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase)["input_ids"] a_ = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase) ] } if return_attention_mask is not False: a_ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) a_ = attention_mask return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , ) ->List[DPRSpanPrediction]: a_ = reader_input["input_ids"] a_ , a_ , a_ = reader_output[:3] a_ = len(__UpperCAmelCase) a_ = sorted(range(__UpperCAmelCase) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__) a_ = [] for doc_id in sorted_docs: a_ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence a_ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: a_ = sequence_ids.index(self.pad_token_id) else: a_ = len(__UpperCAmelCase) a_ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(__UpperCAmelCase) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->List[DPRSpanPrediction]: a_ = [] for start_index, start_score in enumerate(__UpperCAmelCase): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) a_ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x[1] , reverse=__UpperCAmelCase) a_ = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''') a_ = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F'''Span is too long: {length} > {max_answer_length}''') if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(__UpperCAmelCase) == top_spans: break return chosen_span_intervals @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): a_ : Any = VOCAB_FILES_NAMES a_ : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP a_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION a_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
303
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Dict = """Speech2TextFeatureExtractor""" a_ : str = """Speech2TextTokenizer""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: super().__init__(__UpperCAmelCase , __UpperCAmelCase) a_ = self.feature_extractor a_ = False def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") a_ = kwargs.pop("raw_speech") else: a_ = kwargs.pop("audio" , __UpperCAmelCase) a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase) a_ = kwargs.pop("text" , __UpperCAmelCase) if len(__UpperCAmelCase) > 0: a_ = args[0] a_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif audio is None: return encodings else: a_ = encodings["input_ids"] return inputs def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase) @contextmanager def UpperCAmelCase__ ( self) ->Tuple: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call.") a_ = True a_ = self.tokenizer yield a_ = self.feature_extractor a_ = False
303
1
"""simple docstring""" UpperCamelCase_ = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) UpperCamelCase_ = frozenset(['prompt', 'negative_prompt']) UpperCamelCase_ = frozenset([]) UpperCamelCase_ = frozenset(['image']) UpperCamelCase_ = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) UpperCamelCase_ = frozenset(['image']) UpperCamelCase_ = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) UpperCamelCase_ = frozenset(['prompt', 'image', 'negative_prompt']) UpperCamelCase_ = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) UpperCamelCase_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) UpperCamelCase_ = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) UpperCamelCase_ = frozenset(['image', 'mask_image']) UpperCamelCase_ = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) UpperCamelCase_ = frozenset(['example_image', 'image', 'mask_image']) UpperCamelCase_ = frozenset(['class_labels']) UpperCamelCase_ = frozenset(['class_labels']) UpperCamelCase_ = frozenset(['batch_size']) UpperCamelCase_ = frozenset([]) UpperCamelCase_ = frozenset(['batch_size']) UpperCamelCase_ = frozenset([]) UpperCamelCase_ = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) UpperCamelCase_ = frozenset(['prompt', 'negative_prompt']) UpperCamelCase_ = frozenset(['input_tokens']) UpperCamelCase_ = frozenset(['input_tokens'])
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { 'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['AlbertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['AlbertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'AlbertForMaskedLM', 'AlbertForMultipleChoice', 'AlbertForPreTraining', 'AlbertForQuestionAnswering', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertModel', 'AlbertPreTrainedModel', 'load_tf_weights_in_albert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAlbertForMaskedLM', 'TFAlbertForMultipleChoice', 'TFAlbertForPreTraining', 'TFAlbertForQuestionAnswering', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertMainLayer', 'TFAlbertModel', 'TFAlbertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'FlaxAlbertForMaskedLM', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForPreTraining', 'FlaxAlbertForQuestionAnswering', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForTokenClassification', 'FlaxAlbertModel', 'FlaxAlbertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = """ctrl""" a_ : Tuple = ["""past_key_values"""] a_ : List[str] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __UpperCAmelCase=24_65_34 , __UpperCAmelCase=2_56 , __UpperCAmelCase=12_80 , __UpperCAmelCase=81_92 , __UpperCAmelCase=48 , __UpperCAmelCase=16 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ) ->Tuple: a_ = vocab_size a_ = n_positions a_ = n_embd a_ = n_layer a_ = n_head a_ = dff a_ = resid_pdrop a_ = embd_pdrop a_ = layer_norm_epsilon a_ = initializer_range a_ = use_cache super().__init__(**__UpperCAmelCase)
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase_ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json', } class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): a_ : Optional[Any] = """focalnet""" def __init__( self , __UpperCAmelCase=2_24 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[1_92, 3_84, 7_68, 7_68] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1E-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Tuple: super().__init__(**__UpperCAmelCase) a_ = image_size a_ = patch_size a_ = num_channels a_ = embed_dim a_ = use_conv_embed a_ = hidden_sizes a_ = depths a_ = focal_levels a_ = focal_windows a_ = hidden_act a_ = mlp_ratio a_ = hidden_dropout_prob a_ = drop_path_rate a_ = use_layerscale a_ = layerscale_value a_ = use_post_layernorm a_ = use_post_layernorm_in_modulation a_ = normalize_modulator a_ = initializer_range a_ = layer_norm_eps a_ = encoder_stride a_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)] a_ , a_ = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names)
303
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = 0 ) ->int: """simple docstring""" a_ = right or len(UpperCAmelCase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(UpperCAmelCase , UpperCAmelCase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
303
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase_ = 701 UpperCamelCase_ = 1000000000 UpperCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->list[int]: """simple docstring""" if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(UpperCAmelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
303
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class snake_case ( unittest.TestCase ): a_ : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING a_ : Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt") # Using `do_sample=False` to force deterministic output a_ = text_generator("This is a test" , do_sample=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ] , ) a_ = text_generator(["This is a test", "This is a second test"]) self.assertEqual( __UpperCAmelCase , [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ] , ) a_ = text_generator("This is a test" , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ {"generated_token_ids": ANY(__UpperCAmelCase)}, {"generated_token_ids": ANY(__UpperCAmelCase)}, ] , ) a_ = text_generator.model.config.eos_token_id a_ = "<pad>" a_ = text_generator( ["This is a test", "This is a second test"] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {"generated_token_ids": ANY(__UpperCAmelCase)}, {"generated_token_ids": ANY(__UpperCAmelCase)}, ], [ {"generated_token_ids": ANY(__UpperCAmelCase)}, {"generated_token_ids": ANY(__UpperCAmelCase)}, ], ] , ) @require_tf def UpperCAmelCase__ ( self) ->Any: a_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf") # Using `do_sample=False` to force deterministic output a_ = text_generator("This is a test" , do_sample=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ] , ) a_ = text_generator(["This is a test", "This is a second test"] , do_sample=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ] , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]: a_ = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) return text_generator, ["This is a test", "Another test"] def UpperCAmelCase__ ( self) ->Dict: a_ = "Hello I believe in" a_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2") a_ = text_generator(__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , ) a_ = text_generator(__UpperCAmelCase , stop_sequence=" fe") self.assertEqual(__UpperCAmelCase , [{"generated_text": "Hello I believe in fe"}]) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict: a_ = text_generator.model a_ = text_generator.tokenizer a_ = text_generator("This is a test") self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) a_ = text_generator("This is a test" , return_full_text=__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}]) self.assertNotIn("This is a test" , outputs[0]["generated_text"]) a_ = pipeline(task="text-generation" , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase) a_ = text_generator("This is a test") self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}]) self.assertNotIn("This is a test" , outputs[0]["generated_text"]) a_ = text_generator("This is a test" , return_full_text=__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) a_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ [{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}], [{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}], ] , ) if text_generator.tokenizer.pad_token is not None: a_ = text_generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ [{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}], [{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}], ] , ) with self.assertRaises(__UpperCAmelCase): a_ = text_generator("test" , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase) with self.assertRaises(__UpperCAmelCase): a_ = text_generator("test" , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase) with self.assertRaises(__UpperCAmelCase): a_ = text_generator("test" , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): a_ = text_generator("") self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}]) else: with self.assertRaises((ValueError, AssertionError)): a_ = text_generator("") if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. a_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): text_generator("This is a test" * 5_00 , max_new_tokens=20) a_ = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase): text_generator( "This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self) ->str: import torch # Classic `model_kwargs` a_ = pipeline( model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa) a_ = pipe("This is a test") self.assertEqual( __UpperCAmelCase , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa) self.assertEqual(pipe.model.device , torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa) a_ = pipe("This is a test") self.assertEqual( __UpperCAmelCase , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto") self.assertEqual(pipe.model.device , torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa) a_ = pipe("This is a test") self.assertEqual( __UpperCAmelCase , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) @require_torch @require_torch_gpu def UpperCAmelCase__ ( self) ->Dict: import torch a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa) pipe("This is a test") @require_torch @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self) ->Optional[int]: import torch a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa) pipe("This is a test" , do_sample=__UpperCAmelCase , top_p=0.5) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = "Hello world" a_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2") if text_generator.model.framework == "tf": a_ = logging.get_logger("transformers.generation.tf_utils") else: a_ = logging.get_logger("transformers.generation.utils") a_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase) as cl: a_ = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1) self.assertIn(__UpperCAmelCase , cl.out) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase) as cl: a_ = text_generator(__UpperCAmelCase , max_new_tokens=1) self.assertNotIn(__UpperCAmelCase , cl.out) with CaptureLogger(__UpperCAmelCase) as cl: a_ = text_generator(__UpperCAmelCase , max_length=10) self.assertNotIn(__UpperCAmelCase , cl.out)
303
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Dict: a_ = inspect.getfile(accelerate.test_utils) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) a_ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def UpperCAmelCase__ ( self) ->Any: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->str: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''') with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->List[Any]: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) if __name__ == "__main__": UpperCamelCase_ = Accelerator() UpperCamelCase_ = (accelerator.state.process_index + 2, 10) UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase_ = '' UpperCamelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
303
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = ["""transformers""", """torch""", """note_seq"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str: requires_backends(self , ["transformers", "torch", "note_seq"]) @classmethod def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]: requires_backends(cls , ["transformers", "torch", "note_seq"]) @classmethod def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->str: requires_backends(cls , ["transformers", "torch", "note_seq"])
303
"""simple docstring""" from heapq import heappop, heappush import numpy as np def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" a_ , a_ = grid.shape a_ = [-1, 1, 0, 0] a_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] a_ , a_ = [(0, source)], set() a_ = np.full((rows, cols) , np.inf ) a_ = 0 a_ = np.empty((rows, cols) , dtype=UpperCAmelCase ) a_ = None while queue: ((a_) , (a_)) = heappop(UpperCAmelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: a_ = [] while (x, y) != source: path.append((x, y) ) a_ , a_ = predecessors[x, y] path.append(UpperCAmelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase ) ): a_ , a_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: a_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase , (dist + 1, (nx, ny)) ) a_ = dist + 1 a_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase_ = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] UpperCamelCase_ = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] UpperCamelCase_ = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): UpperCamelCase_ = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class snake_case : def __init__( self) ->Optional[int]: a_ = WATERMARK_BITS a_ = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images] a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2) a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0) return images
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase_ = 701 UpperCamelCase_ = 1000000000 UpperCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
303
"""simple docstring""" import math UpperCamelCase_ = 10 UpperCamelCase_ = 7 UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS def UpperCamelCase ( UpperCAmelCase = 20 ) ->str: """simple docstring""" a_ = math.comb(UpperCAmelCase , UpperCAmelCase ) a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase ) a_ = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[int]: """simple docstring""" a_ = [1] for i in range(2 , UpperCAmelCase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" a_ = [] a_ = list(range(UpperCAmelCase ) ) # Find permutation while factorials: a_ = factorials.pop() a_ , a_ = divmod(UpperCAmelCase , UpperCAmelCase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
303
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCamelCase_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: a_ = k.replace(UpperCAmelCase , UpperCAmelCase ) return k def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration: """simple docstring""" a_ = DEFAULTS.copy() cfg_kwargs.update(UpperCAmelCase ) a_ = PegasusConfig(**UpperCAmelCase ) a_ = PegasusForConditionalGeneration(UpperCAmelCase ) a_ = torch_model.model.state_dict() a_ = {} for k, v in tf_weights.items(): a_ = rename_state_dict_key(UpperCAmelCase ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: a_ = v.T a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) a_ = mapping["shared.weight"] a_ = mapping["shared.weight"] a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCAmelCase ) a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) a_ = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict: """simple docstring""" a_ = tf.train.list_variables(UpperCAmelCase ) a_ = {} a_ = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ): a_ = any(pat in name for pat in ignore_name ) if skip_key: continue a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase ) a_ = array return tf_weights def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = Path(UpperCAmelCase ).parent.name a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"] a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCAmelCase ) # convert model a_ = get_tf_weights_as_numpy(UpperCAmelCase ) a_ = task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": a_ = task_specific_params a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase ) torch_model.save_pretrained(UpperCAmelCase ) a_ = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase_ = parser.parse_args() if args.save_dir is None: UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name UpperCamelCase_ = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
303
1
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class snake_case : def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14") ->None: a_ = device a_ = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase) a_ = [0.48_145_466, 0.4_578_275, 0.40_821_073] a_ = [0.26_862_954, 0.26_130_258, 0.27_577_711] a_ = torchvision.transforms.Normalize(self.image_mean , self.image_std) a_ = torchvision.transforms.Resize(2_24) a_ = torchvision.transforms.CenterCrop(2_24) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]: a_ = self.resize(__UpperCAmelCase) a_ = self.center_crop(__UpperCAmelCase) a_ = self.normalize(__UpperCAmelCase) return images def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase) ->Dict: a_ = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase) a_ = self.preprocess_img(__UpperCAmelCase) a_ = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class snake_case ( nn.Module ): def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) ->None: super().__init__() a_ = None a_ = device if device else get_device() if vqgan: a_ = vqgan else: a_ = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase) self.vqgan.eval() if clip: a_ = clip else: a_ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) a_ = ProcessorGradientFlow(device=self.device) a_ = iterations a_ = lr a_ = log a_ = make_grid a_ = return_val a_ = quantize a_ = self.vqgan.decoder.z_shape def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True) ->Tuple: a_ = [] if output_path is None: a_ = "./animation.gif" if input_path is None: a_ = self.save_path a_ = sorted(glob(input_path + "/*")) if not len(__UpperCAmelCase): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)") if len(__UpperCAmelCase) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") a_ = total_duration / len(__UpperCAmelCase) a_ = [frame_duration] * len(__UpperCAmelCase) if extend_frames: a_ = 1.5 a_ = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(__UpperCAmelCase)) imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase) print(F'''gif saved to {output_path}''') def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None) ->Any: if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError a_ = preprocess(Image.open(__UpperCAmelCase) , target_image_size=2_56).to(self.device) a_ = preprocess_vqgan(__UpperCAmelCase) a_ , *a_ = self.vqgan.encode(__UpperCAmelCase) return z def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]: a_ = self.latent.detach().requires_grad_() a_ = base_latent + transform_vector if self.quantize: a_ , *a_ = self.vqgan.quantize(__UpperCAmelCase) else: a_ = trans_latent return self.vqgan.decode(__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Dict: a_ = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase) a_ = self.clip(**__UpperCAmelCase) a_ = clip_outputs.logits_per_image if weights is not None: a_ = similarity_logits * weights return similarity_logits.sum() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple: a_ = self._get_clip_similarity(pos_prompts["prompts"] , __UpperCAmelCase , weights=(1 / pos_prompts["weights"])) if neg_prompts: a_ = self._get_clip_similarity(neg_prompts["prompts"] , __UpperCAmelCase , weights=neg_prompts["weights"]) else: a_ = torch.tensor([1] , device=self.device) a_ = -torch.log(__UpperCAmelCase) + torch.log(__UpperCAmelCase) return loss def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: a_ = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device) a_ = torch.optim.Adam([vector] , lr=self.lr) for i in range(self.iterations): optim.zero_grad() a_ = self._add_vector(__UpperCAmelCase) a_ = loop_post_process(__UpperCAmelCase) a_ = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) print("CLIP loss" , __UpperCAmelCase) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=__UpperCAmelCase) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]: wandb.init(reinit=__UpperCAmelCase , project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: a_ = Image.open(__UpperCAmelCase) a_ = image.resize((2_56, 2_56)) wandb.log("Original Image" , wandb.Image(__UpperCAmelCase)) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Any: if not prompts: return [] a_ = [] a_ = [] if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(__UpperCAmelCase , (tuple, list)): a_ = prompt[0] a_ = float(prompt[1]) elif ":" in prompt: a_ , a_ = prompt.split(":") a_ = float(__UpperCAmelCase) else: a_ = prompt a_ = 1.0 processed_prompts.append(__UpperCAmelCase) weights.append(__UpperCAmelCase) return { "prompts": processed_prompts, "weights": torch.tensor(__UpperCAmelCase , device=self.device), } def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Optional[Any]: if image_path: a_ = self._get_latent(__UpperCAmelCase) else: a_ = torch.randn(self.latent_dim , device=self.device) if self.log: self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) assert pos_prompts, "You must provide at least one positive prompt." a_ = self.process_prompts(__UpperCAmelCase) a_ = self.process_prompts(__UpperCAmelCase) if save_final and save_path is None: a_ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"])) if not os.path.exists(__UpperCAmelCase): os.makedirs(__UpperCAmelCase) else: a_ = save_path + "_" + get_timestamp() os.makedirs(__UpperCAmelCase) a_ = save_path a_ = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(__UpperCAmelCase)) a_ = loop_post_process(__UpperCAmelCase) for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)): if show_intermediate: show_pil(__UpperCAmelCase) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''')) if self.log: wandb.log({"Image": wandb.Image(__UpperCAmelCase)}) if show_final: show_pil(__UpperCAmelCase) if save_final: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png'''))
303
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = use_labels a_ = scope def UpperCAmelCase__ ( self) ->Any: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self) ->Optional[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self) ->List[str]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str: a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]: a_ = True a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]: a_ = True a_ = True a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval() # first forward pass a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple: a_ = BertGenerationDecoder(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self) ->str: a_ , a_ , a_ , a_ = self.prepare_config_and_inputs() a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () a_ : List[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = BertGenerationEncoderTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs() a_ = "bert" self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: # This regression test was failing with PyTorch < 1.3 ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() a_ = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(__UpperCAmelCase) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 10_24]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 5_03_58]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): a_ : Optional[int] = """nat""" a_ : Optional[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=[3, 4, 6, 5] , __UpperCAmelCase=[2, 4, 8, 16] , __UpperCAmelCase=7 , __UpperCAmelCase=3.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->List[Any]: super().__init__(**__UpperCAmelCase) a_ = patch_size a_ = num_channels a_ = embed_dim a_ = depths a_ = len(__UpperCAmelCase) a_ = num_heads a_ = kernel_size a_ = mlp_ratio a_ = qkv_bias a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = drop_path_rate a_ = hidden_act a_ = layer_norm_eps a_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a_ = int(embed_dim * 2 ** (len(__UpperCAmelCase) - 1)) a_ = layer_scale_init_value a_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase) + 1)] a_ , a_ = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names)
303
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if "resnet-50" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase ) # set label attributes a_ = "panoptic" in model_name if is_panoptic: a_ = 250 else: a_ = 91 a_ = "huggingface/label-files" a_ = "coco-detection-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = state_dict.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = "" if is_panoptic: a_ = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a_ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a_ = in_proj_weight_cross_attn[:256, :] a_ = in_proj_bias_cross_attn[:256] a_ = in_proj_weight_cross_attn[256:512, :] a_ = in_proj_bias_cross_attn[256:512] a_ = in_proj_weight_cross_attn[-256:, :] a_ = in_proj_bias_cross_attn[-256:] def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]: """simple docstring""" a_ , a_ = get_detr_config(UpperCAmelCase ) # load original model from torch hub a_ = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(F'''Converting model {model_name}...''' ) a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval() a_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase ): if is_panoptic: a_ = "detr." + src rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a_ = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a_ = state_dict.pop(UpperCAmelCase ) a_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val # finally, create HuggingFace model and load state dict a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) model.eval() # verify our conversion on an image a_ = "coco_panoptic" if is_panoptic else "coco_detection" a_ = DetrImageProcessor(format=UpperCAmelCase ) a_ = processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = detr(UpperCAmelCase ) a_ = model(UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase ( UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: a_ = [3, 3, 3, 3] a_ = [5, 5, 5, 5] elif "fl4" in model_name: a_ = [4, 4, 4, 4] a_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: a_ = [3, 3, 3, 3] if "lrf" in model_name: a_ = [3, 3, 3, 3] else: a_ = [2, 2, 2, 2] if "tiny" in model_name: a_ = 96 elif "small" in model_name: a_ = 96 elif "base" in model_name: a_ = 128 elif "large" in model_name: a_ = 192 elif "xlarge" in model_name: a_ = 256 elif "huge" in model_name: a_ = 352 # set label information a_ = "huggingface/label-files" if "large" in model_name or "huge" in model_name: a_ = "imagenet-22k-id2label.json" else: a_ = "imagenet-1k-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = {v: k for k, v in idalabel.items()} a_ = FocalNetConfig( embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , ) return config def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" if "patch_embed.proj" in name: a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a_ = "encoder." + name if "encoder.layers" in name: a_ = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: a_ = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: a_ = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: a_ = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: a_ = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: a_ = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": a_ = "layernorm.weight" if name == "norm.bias": a_ = "layernorm.bias" if "head" in name: a_ = name.replace("head" , "classifier" ) else: a_ = "focalnet." + name return name def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict: """simple docstring""" a_ = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on a_ = model_name_to_url[model_name] print("Checkpoint URL: " , UpperCAmelCase ) a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): a_ = state_dict.pop(UpperCAmelCase ) a_ = val a_ = get_focalnet_config(UpperCAmelCase ) a_ = FocalNetForImageClassification(UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(UpperCAmelCase ) # verify conversion a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = BitImageProcessor( do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , ) a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) a_ = processor(images=UpperCAmelCase , return_tensors="pt" ) a_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 ) a_ = model(**UpperCAmelCase ) a_ = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": a_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": a_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": a_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": a_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": a_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": a_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCamelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Any = ReformerTokenizer a_ : Any = ReformerTokenizerFast a_ : Union[str, Any] = True a_ : int = False a_ : Dict = True def UpperCAmelCase__ ( self) ->List[str]: super().setUp() a_ = ReformerTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = "<s>" a_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<unk>") self.assertEqual(vocab_keys[1] , "<s>") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(__UpperCAmelCase) , 10_00) def UpperCAmelCase__ ( self) ->List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 10_00) def UpperCAmelCase__ ( self) ->int: if not self.test_rust_tokenizer: return a_ = self.get_tokenizer() a_ = self.get_rust_tokenizer() a_ = "I was born in 92000, and this is falsé." a_ = tokenizer.tokenize(__UpperCAmelCase) a_ = rust_tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a_ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) a_ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a_ = self.get_rust_tokenizer() a_ = tokenizer.encode(__UpperCAmelCase) a_ = rust_tokenizer.encode(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase=15) ->int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''): a_ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase) # Simple input a_ = "This is a simple input" a_ = ["This is a simple input 1", "This is a simple input 2"] a_ = ("This is a simple input", "This is a pair") a_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length") # Simple input self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length") # Simple input self.assertRaises( __UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , ) # Pair input self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length") # Pair input self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length") # Pair input self.assertRaises( __UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , ) def UpperCAmelCase__ ( self) ->List[str]: pass def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ReformerTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase) a_ = tokenizer.tokenize("This is a test") self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [2_85, 46, 10, 1_70, 3_82] , ) a_ = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a_ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) a_ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def UpperCAmelCase__ ( self) ->Union[str, Any]: return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment") @slow def UpperCAmelCase__ ( self) ->str: a_ = "Hello World!" a_ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase)) @slow def UpperCAmelCase__ ( self) ->Dict: a_ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) a_ = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase)) @require_torch @slow def UpperCAmelCase__ ( self) ->Any: import torch from transformers import ReformerConfig, ReformerModel # Build sequence a_ = list(self.big_tokenizer.get_vocab().keys())[:10] a_ = " ".join(__UpperCAmelCase) a_ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt") a_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt") a_ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) a_ = encoded_sequence["input_ids"].shape a_ = ReformerModel(__UpperCAmelCase) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCAmelCase) model(**__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: # fmt: off a_ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 a_ = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__UpperCAmelCase , sequences=__UpperCAmelCase , )
303
"""simple docstring""" import os import numpy import onnx def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = a.name a_ = b.name a_ = "" a_ = "" a_ = a == b a_ = name_a a_ = name_b return res def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = list(model.graph.initializer ) a_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a_ = inits[i].name a_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = os.path.dirname(UpperCAmelCase ) a_ = os.path.basename(UpperCAmelCase ) a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) a_ = list(model.graph.initializer ) a_ = set() a_ = {} a_ = [] a_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) a_ = inits[j].data_type a_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , UpperCAmelCase ) total_reduced_size += mem_size a_ = inits[i].name a_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: a_ = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" ) a_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = "optimized_" + model_file_name a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
303
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" for char in word: a_ = ord(UpperCAmelCase ) if not _is_chinese_char(UpperCAmelCase ): return 0 return 1 def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" a_ = set() for token in tokens: a_ = len(UpperCAmelCase ) > 1 and is_chinese(UpperCAmelCase ) if chinese_word: word_set.add(UpperCAmelCase ) a_ = list(UpperCAmelCase ) return word_list def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" if not chinese_word_set: return bert_tokens a_ = max([len(UpperCAmelCase ) for w in chinese_word_set] ) a_ = bert_tokens a_ , a_ = 0, len(UpperCAmelCase ) while start < end: a_ = True if is_chinese(bert_word[start] ): a_ = min(end - start , UpperCAmelCase ) for i in range(UpperCAmelCase , 1 , -1 ): a_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ = "##" + bert_word[j] a_ = start + i a_ = False break if single_word: start += 1 return bert_word def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = [] for i in range(0 , len(UpperCAmelCase ) , 100 ): a_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws a_ = [get_chinese_word(UpperCAmelCase ) for r in res] ltp_res.extend(UpperCAmelCase ) assert len(UpperCAmelCase ) == len(UpperCAmelCase ) a_ = [] for i in range(0 , len(UpperCAmelCase ) , 100 ): a_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase , truncation=UpperCAmelCase , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(UpperCAmelCase ) == len(UpperCAmelCase ) a_ = [] for input_ids, chinese_word in zip(UpperCAmelCase , UpperCAmelCase ): a_ = [] for id in input_ids: a_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase ) input_tokens.append(UpperCAmelCase ) a_ = add_sub_symbol(UpperCAmelCase , UpperCAmelCase ) a_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCAmelCase ): if token[:2] == "##": a_ = token[2:] # save chinese tokens' pos if len(UpperCAmelCase ) == 1 and _is_chinese_char(ord(UpperCAmelCase ) ): ref_id.append(UpperCAmelCase ) ref_ids.append(UpperCAmelCase ) assert len(UpperCAmelCase ) == len(UpperCAmelCase ) return ref_ids def UpperCamelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" with open(args.file_name , "r" , encoding="utf-8" ) as f: a_ = f.readlines() a_ = [line.strip() for line in data if len(UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ = LTP(args.ltp ) # faster in GPU device a_ = BertTokenizer.from_pretrained(args.bert ) a_ = prepare_ref(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) with open(args.save_path , "w" , encoding="utf-8" ) as f: a_ = [json.dumps(UpperCAmelCase ) + "\n" for ref in ref_ids] f.writelines(UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase_ = parser.parse_args() main(args)
303
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str: a_ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } a_ = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): a_ = token_dict["token"] a_ = Tokenizer(Unigram()) a_ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}") , " "), normalizers.Lowercase(), ]) a_ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase), pre_tokenizers.Digits(individual_digits=__UpperCAmelCase), pre_tokenizers.Punctuation(), ]) a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase) a_ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) a_ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [files] self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = json.loads(self._tokenizer.to_str()) a_ = self.special_tokens["unk"]["id"] a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase_ = { 'configuration_pix2struct': [ 'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Pix2StructConfig', 'Pix2StructTextConfig', 'Pix2StructVisionConfig', ], 'processing_pix2struct': ['Pix2StructProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['Pix2StructImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Pix2StructPreTrainedModel', 'Pix2StructForConditionalGeneration', 'Pix2StructVisionModel', 'Pix2StructTextModel', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
303
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument UpperCamelCase_ = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def UpperCamelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" a_ = list(s_dict.keys() ) for key in keys: a_ = r".*/layers_(\d+)" a_ = key if re.match(UpperCAmelCase , UpperCAmelCase ): a_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , UpperCAmelCase ) a_ = r"(encoder|decoder)\/" if re.match(UpperCAmelCase , UpperCAmelCase ): a_ = re.match(UpperCAmelCase , UpperCAmelCase ).groups() if groups[0] == "encoder": a_ = re.sub(r"/mlp/" , r"/1/mlp/" , UpperCAmelCase ) a_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , UpperCAmelCase ) elif groups[0] == "decoder": a_ = re.sub(r"/mlp/" , r"/2/mlp/" , UpperCAmelCase ) a_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , UpperCAmelCase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: a_ = new_key.replace(UpperCAmelCase , UpperCAmelCase ) print(F'''{key} -> {new_key}''' ) a_ = s_dict.pop(UpperCAmelCase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: a_ = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: a_ = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: a_ = s_dict[key].shape[0] a_ = s_dict[key] for idx in range(UpperCAmelCase ): a_ = expert_weihts[idx] print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' ) s_dict.pop(UpperCAmelCase ) return s_dict UpperCamelCase_ = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" import regex as re with open(UpperCAmelCase , "r" ) as f: a_ = f.read() a_ = re.findall(r"(.*) = ([0-9.]*)" , UpperCAmelCase ) a_ = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": a_ = float(UpperCAmelCase ) if "." in value else int(UpperCAmelCase ) a_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , UpperCAmelCase )[0] a_ = str(activation[1] ) a_ = num_experts a_ = SwitchTransformersConfig(**UpperCAmelCase ) return config def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase="./" , UpperCAmelCase=8 ) ->int: """simple docstring""" print(F'''Loading flax weights from : {flax_checkpoint_path}''' ) a_ = checkpoints.load_tax_checkpoint(UpperCAmelCase ) if gin_file is not None: a_ = convert_gin_to_config(UpperCAmelCase , UpperCAmelCase ) else: a_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase ) a_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase ) a_ = flax_params["target"] a_ = flatten_dict(UpperCAmelCase , sep="/" ) a_ = rename_keys(UpperCAmelCase ) a_ = unflatten_dict(UpperCAmelCase , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCAmelCase , UpperCAmelCase ) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') UpperCamelCase_ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
303
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = """audio-spectrogram-transformer""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = patch_size a_ = qkv_bias a_ = frequency_stride a_ = time_stride a_ = max_length a_ = num_mel_bins
303
1
"""simple docstring""" import warnings from functools import wraps from typing import Callable def UpperCamelCase ( UpperCAmelCase ) ->Callable: """simple docstring""" @wraps(UpperCAmelCase ) def _inner_fn(*UpperCAmelCase , **UpperCAmelCase ): warnings.warn( (F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCAmelCase , ) return fn(*UpperCAmelCase , **UpperCAmelCase ) return _inner_fn
303
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = """xlm-roberta""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCamelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" a_ = {} with open(UpperCAmelCase , "r" ) as file: for line_number, line in enumerate(UpperCAmelCase ): a_ = line.strip() if line: a_ = line.split() a_ = line_number a_ = words[0] a_ = value return result def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" for attribute in key.split("." ): a_ = getattr(UpperCAmelCase , UpperCAmelCase ) a_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase ): a_ = PARAM_MAPPING[full_name.split("." )[-1]] a_ = "param" if weight_type is not None and weight_type != "param": a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape elif weight_type is not None and weight_type == "param": a_ = hf_pointer for attribute in hf_param_name.split("." ): a_ = getattr(UpperCAmelCase , UpperCAmelCase ) a_ = shape_pointer.shape # let's reduce dimension a_ = value[0] else: a_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": a_ = value elif weight_type == "weight_g": a_ = value elif weight_type == "weight_v": a_ = value elif weight_type == "bias": a_ = value elif weight_type == "param": for attribute in hf_param_name.split("." ): a_ = getattr(UpperCAmelCase , UpperCAmelCase ) a_ = value else: a_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase ): a_ = PARAM_MAPPING[full_name.split("." )[-1]] a_ = "param" if weight_type is not None and weight_type != "param": a_ = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": a_ = ".".join([key, hf_param_name] ) else: a_ = key a_ = value if "lm_head" in full_key else value[0] UpperCamelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) ->Optional[Any]: """simple docstring""" a_ = False for key, mapped_key in MAPPING.items(): a_ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: a_ = True if "*" in mapped_key: a_ = name.split(UpperCAmelCase )[0].split("." )[-2] a_ = mapped_key.replace("*" , UpperCAmelCase ) if "weight_g" in name: a_ = "weight_g" elif "weight_v" in name: a_ = "weight_v" elif "bias" in name: a_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj a_ = "weight" else: a_ = None if hf_dict is not None: rename_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return is_used return is_used def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str: """simple docstring""" a_ = [] a_ = fairseq_model.state_dict() a_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): a_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , ) a_ = True else: a_ = load_wavaveca_layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if not is_used: unused_weights.append(UpperCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = full_name.split("conv_layers." )[-1] a_ = name.split("." ) a_ = int(items[0] ) a_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase ) @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ) ->Tuple: """simple docstring""" if config_path is not None: a_ = WavaVecaConfig.from_pretrained(UpperCAmelCase ) else: a_ = WavaVecaConfig() if is_seq_class: a_ = read_txt_into_dict(UpperCAmelCase ) a_ = idalabel a_ = WavaVecaForSequenceClassification(UpperCAmelCase ) a_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ) feature_extractor.save_pretrained(UpperCAmelCase ) elif is_finetuned: if dict_path: a_ = Dictionary.load(UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a_ = target_dict.pad_index a_ = target_dict.bos_index a_ = target_dict.eos_index a_ = len(target_dict.symbols ) a_ = os.path.join(UpperCAmelCase , "vocab.json" ) if not os.path.isdir(UpperCAmelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) ) return os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = target_dict.indices # fairseq has the <pad> and <s> switched a_ = 0 a_ = 1 with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(UpperCAmelCase , UpperCAmelCase ) a_ = WavaVecaCTCTokenizer( UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , ) a_ = True if config.feat_extract_norm == "layer" else False a_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ) a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) a_ = WavaVecaForCTC(UpperCAmelCase ) else: a_ = WavaVecaForPreTraining(UpperCAmelCase ) if is_finetuned or is_seq_class: a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: a_ = argparse.Namespace(task="audio_pretraining" ) a_ = fairseq.tasks.setup_task(UpperCAmelCase ) a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase ) a_ = model[0].eval() recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
303
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = scope a_ = range_bbox def UpperCAmelCase__ ( self) ->int: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: a_ = bbox[i, j, 3] a_ = bbox[i, j, 1] a_ = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ = bbox[i, j, 2] a_ = bbox[i, j, 0] a_ = t a_ = None if self.use_input_mask: a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self) ->List[str]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any: a_ = LiltModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]: a_ = self.num_labels a_ = LiltForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict: a_ = LiltForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self) ->str: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a_ : List[str] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) a_ : Any = False a_ : Dict = False def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: return True def UpperCAmelCase__ ( self) ->str: a_ = LiltModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->List[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = LiltModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @require_torch @slow class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->List[Any]: a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase) a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase) a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase) # forward pass with torch.no_grad(): a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase) a_ = torch.Size([1, 2, 7_68]) a_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
303
1
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": UpperCamelCase_ = input('Enter image url: ').strip() print(F"""Downloading image from {url} ...""") UpperCamelCase_ = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image UpperCamelCase_ = soup.find('meta', {'property': 'og:image'})['content'] UpperCamelCase_ = requests.get(image_url).content UpperCamelCase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
303
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case ( SCREAMING_SNAKE_CASE_ ): def UpperCAmelCase__ ( self) ->Any: a_ = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim")) self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads")) class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]: a_ = parent a_ = batch_size a_ = image_size a_ = patch_sizes a_ = patch_stride a_ = patch_padding a_ = is_training a_ = use_labels a_ = num_labels a_ = num_channels a_ = embed_dim a_ = num_heads a_ = stride_kv a_ = depth a_ = cls_token a_ = attention_drop_rate a_ = initializer_range a_ = layer_norm_eps def UpperCAmelCase__ ( self) ->Any: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a_ = None if self.use_labels: # create a random int32 tensor of given shape a_ = ids_tensor([self.batch_size] , self.num_labels) a_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self) ->Union[str, Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]: a_ = TFCvtModel(config=__UpperCAmelCase) a_ = model(__UpperCAmelCase , training=__UpperCAmelCase) a_ = (self.image_size, self.image_size) a_ , a_ = image_size[0], image_size[1] for i in range(len(self.depth)): a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = self.num_labels a_ = TFCvtForImageClassification(__UpperCAmelCase) a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self) ->Tuple: a_ = self.prepare_config_and_inputs() a_ , a_ , a_ = config_and_inputs a_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () a_ : List[Any] = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) a_ : Any = False a_ : Dict = False a_ : Optional[int] = False a_ : List[Any] = False a_ : List[Any] = False def UpperCAmelCase__ ( self) ->List[str]: a_ = TFCvtModelTester(self) a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[str]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def UpperCAmelCase__ ( self) ->Dict: pass @unittest.skip(reason="Cvt does not use inputs_embeds") def UpperCAmelCase__ ( self) ->List[str]: pass @unittest.skip(reason="Cvt does not support input and output embeddings") def UpperCAmelCase__ ( self) ->Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def UpperCAmelCase__ ( self) ->Dict: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def UpperCAmelCase__ ( self) ->List[str]: super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8") def UpperCAmelCase__ ( self) ->Dict: a_ = tf.keras.mixed_precision.Policy("mixed_float16") tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32") def UpperCAmelCase__ ( self) ->Optional[int]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) a_ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase): a_ = model_class(__UpperCAmelCase) a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a_ = outputs.hidden_states a_ = len(self.model_tester.depth) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = TFCvtModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self) ->int: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def UpperCAmelCase__ ( self) ->Any: a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf") # forward pass a_ = model(**__UpperCAmelCase) # verify the logits a_ = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , __UpperCAmelCase) a_ = tf.constant([0.9_285, 0.9_015, -0.3_150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[str] = """mra""" def __init__( self , __UpperCAmelCase=5_02_65 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="absolute" , __UpperCAmelCase=4 , __UpperCAmelCase="full" , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = max_position_embeddings a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = type_vocab_size a_ = layer_norm_eps a_ = position_embedding_type a_ = block_per_row a_ = approx_mode a_ = initial_prior_first_n_blocks a_ = initial_prior_diagonal_n_blocks
303
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Dict = """Speech2TextFeatureExtractor""" a_ : str = """Speech2TextTokenizer""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: super().__init__(__UpperCAmelCase , __UpperCAmelCase) a_ = self.feature_extractor a_ = False def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") a_ = kwargs.pop("raw_speech") else: a_ = kwargs.pop("audio" , __UpperCAmelCase) a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase) a_ = kwargs.pop("text" , __UpperCAmelCase) if len(__UpperCAmelCase) > 0: a_ = args[0] a_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif audio is None: return encodings else: a_ = encodings["input_ids"] return inputs def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase) @contextmanager def UpperCAmelCase__ ( self) ->Tuple: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call.") a_ = True a_ = self.tokenizer yield a_ = self.feature_extractor a_ = False
303
1
"""simple docstring""" from typing import Any def UpperCamelCase ( UpperCAmelCase ) ->list[Any]: """simple docstring""" if not input_list: return [] a_ = [input_list.count(UpperCAmelCase ) for value in input_list] a_ = max(UpperCAmelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(UpperCAmelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=2 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , ) ->List[Any]: a_ = parent a_ = batch_size a_ = patch_size a_ = max_length a_ = num_mel_bins a_ = is_training a_ = use_labels a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = type_sequence_label_size a_ = initializer_range a_ = scope a_ = frequency_stride a_ = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a_ = (self.max_length - self.patch_size) // self.time_stride + 1 a_ = frequency_out_dimension * time_out_dimension a_ = num_patches + 2 def UpperCAmelCase__ ( self) ->Dict: a_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = self.get_config() return config, input_values, labels def UpperCAmelCase__ ( self) ->Dict: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any: a_ = ASTModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = {"input_values": input_values} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Optional[Any] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) a_ : Optional[Any] = ( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) a_ : int = False a_ : Any = False a_ : Optional[Any] = False a_ : Union[str, Any] = False def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = ASTModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds") def UpperCAmelCase__ ( self) ->Optional[Any]: pass def UpperCAmelCase__ ( self) ->Optional[int]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear)) def UpperCAmelCase__ ( self) ->Optional[Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) a_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["input_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Any: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->Dict: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = ASTModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def UpperCamelCase ( ) ->int: """simple docstring""" a_ = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a_ , a_ = torchaudio.load(UpperCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self) ->List[str]: return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593") if is_torchaudio_available() else None ) @slow def UpperCAmelCase__ ( self) ->Tuple: a_ = self.default_feature_extractor a_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(__UpperCAmelCase) a_ = self.default_feature_extractor a_ , a_ = prepare_audio() a_ = audio.squeeze().numpy() a_ = feature_extractor(__UpperCAmelCase , sampling_rate=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase) # forward pass with torch.no_grad(): a_ = model(**__UpperCAmelCase) # verify the logits a_ = torch.Size((1, 5_27)) self.assertEqual(outputs.logits.shape , __UpperCAmelCase) a_ = torch.tensor([-0.8_760, -7.0_042, -8.6_602]).to(__UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4))
303
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]: """simple docstring""" a_ = LxmertConfig.from_json_file(UpperCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) a_ = LxmertForPreTraining(UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase_ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase_ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
1
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=8 ) ->Union[str, Any]: """simple docstring""" a_ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a_ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->str: super().__init__() self.register_modules( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , ) a_ = 2 ** (len(self.movq.config.block_out_channels) - 1) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]: if latents is None: a_ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''') a_ = latents.to(__UpperCAmelCase) a_ = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase__ ( self , __UpperCAmelCase=0) ->Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") a_ = torch.device(F'''cuda:{gpu_id}''') a_ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase=0) ->Any: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") a_ = torch.device(F'''cuda:{gpu_id}''') if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCAmelCase) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a_ = None for cpu_offloaded_model in [self.unet, self.movq]: a_ , a_ = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase) # We'll offload the last model manually. a_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase__ ( self) ->Union[str, Any]: if not hasattr(self.unet , "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCAmelCase , "_hf_hook") and hasattr(module._hf_hook , "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(__UpperCAmelCase) def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 5_12 , __UpperCAmelCase = 5_12 , __UpperCAmelCase = 1_00 , __UpperCAmelCase = 4.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) ->Any: a_ = self._execution_device a_ = guidance_scale > 1.0 if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = torch.cat(__UpperCAmelCase , dim=0) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = torch.cat(__UpperCAmelCase , dim=0) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = torch.cat(__UpperCAmelCase , dim=0) a_ = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: a_ = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0) a_ = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0) a_ = hint.repeat_interleave(__UpperCAmelCase , dim=0) a_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=__UpperCAmelCase) a_ = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=__UpperCAmelCase) self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase) a_ = self.scheduler.timesteps a_ = self.movq.config.latent_channels a_ , a_ = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor) # create initial latent a_ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCAmelCase)): # expand the latents if we are doing classifier free guidance a_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents a_ = {"image_embeds": image_embeds, "hint": hint} a_ = self.unet( sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0] if do_classifier_free_guidance: a_ , a_ = noise_pred.split(latents.shape[1] , dim=1) a_ , a_ = noise_pred.chunk(2) a_ , a_ = variance_pred.chunk(2) a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a_ = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a_ , a_ = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 a_ = self.scheduler.step( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0] # post-processing a_ = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase)["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''') if output_type in ["np", "pil"]: a_ = image * 0.5 + 0.5 a_ = image.clamp(0 , 1) a_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": a_ = self.numpy_to_pil(__UpperCAmelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase)
303
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase_ = 701 UpperCamelCase_ = 1000000000 UpperCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
303
1
"""simple docstring""" import operator def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None ) ->list: """simple docstring""" a_ = operator.lt if reverse else operator.gt a_ = solution or [] if not arr: return solution a_ = [arr.pop(0 )] for i, item in enumerate(UpperCAmelCase ): if _operator(UpperCAmelCase , sublist[-1] ): sublist.append(UpperCAmelCase ) arr.pop(UpperCAmelCase ) # merging sublist into solution list if not solution: solution.extend(UpperCAmelCase ) else: while sublist: a_ = sublist.pop(0 ) for i, xx in enumerate(UpperCAmelCase ): if not _operator(UpperCAmelCase , UpperCAmelCase ): solution.insert(UpperCAmelCase , UpperCAmelCase ) break else: solution.append(UpperCAmelCase ) strand_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
303
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
1
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , ) ->Optional[Any]: a_ = parent a_ = batch_size a_ = image_size a_ = patch_size a_ = num_channels a_ = embed_dim a_ = depths a_ = num_heads a_ = window_size a_ = mlp_ratio a_ = qkv_bias a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = drop_path_rate a_ = hidden_act a_ = use_absolute_embeddings a_ = patch_norm a_ = layer_norm_eps a_ = initializer_range a_ = is_training a_ = scope a_ = use_labels a_ = type_sequence_label_size a_ = encoder_stride a_ = out_features a_ = out_indices def UpperCAmelCase__ ( self) ->int: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self) ->Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple: a_ = MaskFormerSwinModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase) a_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) a_ = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]: a_ = MaskFormerSwinBackbone(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [16, 32, 64]) # verify ValueError with self.parent.assertRaises(__UpperCAmelCase): a_ = ["stem"] a_ = MaskFormerSwinBackbone(config=__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.prepare_config_and_inputs() a_ , a_ , a_ = config_and_inputs a_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Optional[int] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a_ : Any = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} a_ : Dict = False a_ : List[Any] = False a_ : Union[str, Any] = False a_ : Dict = False a_ : int = False def UpperCAmelCase__ ( self) ->Optional[int]: a_ = MaskFormerSwinModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def UpperCAmelCase__ ( self) ->List[Any]: pass def UpperCAmelCase__ ( self) ->Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self) ->str: return def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Any: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__UpperCAmelCase) @unittest.skip("Swin does not use inputs_embeds") def UpperCAmelCase__ ( self) ->str: pass @unittest.skip("Swin does not support feedforward chunking") def UpperCAmelCase__ ( self) ->Union[str, Any]: pass def UpperCAmelCase__ ( self) ->Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear)) def UpperCAmelCase__ ( self) ->List[Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) a_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def UpperCAmelCase__ ( self) ->int: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def UpperCAmelCase__ ( self) ->Dict: pass def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() with torch.no_grad(): a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a_ = outputs.hidden_states a_ = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # Swin has a different seq_length a_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) a_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: a_ = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = 3 a_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) a_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) a_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) a_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: a_ = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def UpperCAmelCase__ ( self) ->List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def UpperCAmelCase__ ( self) ->Dict: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def UpperCAmelCase__ ( self) ->int: pass def UpperCAmelCase__ ( self) ->Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__UpperCAmelCase): a_ = 0 return t def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={}): with torch.no_grad(): a_ = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase) a_ = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase).to_tuple() def recursive_check(__UpperCAmelCase , __UpperCAmelCase): if isinstance(__UpperCAmelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase): recursive_check(__UpperCAmelCase , __UpperCAmelCase) elif isinstance(__UpperCAmelCase , __UpperCAmelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__UpperCAmelCase , __UpperCAmelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__UpperCAmelCase) , set_nan_tensor_to_zero(__UpperCAmelCase) , atol=1E-5) , msg=( "Tuple and dict output are not equal. Difference:" F''' {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:''' F''' {torch.isnan(__UpperCAmelCase).any()} and `inf`: {torch.isinf(__UpperCAmelCase)}. Dict has''' F''' `nan`: {torch.isnan(__UpperCAmelCase).any()} and `inf`: {torch.isinf(__UpperCAmelCase)}.''' ) , ) recursive_check(__UpperCAmelCase , __UpperCAmelCase) for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True}) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase) a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True}) @require_torch class snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ): a_ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () a_ : List[str] = MaskFormerSwinConfig def UpperCAmelCase__ ( self) ->Optional[int]: a_ = MaskFormerSwinModelTester(self) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: a_ = backbone_class(__UpperCAmelCase) backbone.to(__UpperCAmelCase) backbone.eval() a_ = backbone(**__UpperCAmelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True a_ = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) a_ , a_ , a_ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: a_ = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase) self.assertIsNotNone(outputs.attentions)
303
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Dict: a_ = inspect.getfile(accelerate.test_utils) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) a_ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def UpperCAmelCase__ ( self) ->Any: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->str: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''') with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->List[Any]: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) if __name__ == "__main__": UpperCamelCase_ = Accelerator() UpperCamelCase_ = (accelerator.state.process_index + 2, 10) UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase_ = '' UpperCamelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
303
1
"""simple docstring""" from math import isqrt def UpperCamelCase ( UpperCAmelCase ) ->list[int]: """simple docstring""" a_ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCAmelCase , UpperCAmelCase ): a_ = False return [i for i in range(2 , UpperCAmelCase ) if is_prime[i]] def UpperCamelCase ( UpperCAmelCase = 10**8 ) ->int: """simple docstring""" a_ = calculate_prime_numbers(max_number // 2 ) a_ = 0 a_ = 0 a_ = len(UpperCAmelCase ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F"""{solution() = }""")
303
"""simple docstring""" from heapq import heappop, heappush import numpy as np def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" a_ , a_ = grid.shape a_ = [-1, 1, 0, 0] a_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] a_ , a_ = [(0, source)], set() a_ = np.full((rows, cols) , np.inf ) a_ = 0 a_ = np.empty((rows, cols) , dtype=UpperCAmelCase ) a_ = None while queue: ((a_) , (a_)) = heappop(UpperCAmelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: a_ = [] while (x, y) != source: path.append((x, y) ) a_ , a_ = predecessors[x, y] path.append(UpperCAmelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase ) ): a_ , a_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: a_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase , (dist + 1, (nx, ny)) ) a_ = dist + 1 a_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
303
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class snake_case : def __init__( self , __UpperCAmelCase , ) ->Union[str, Any]: a_ = parent a_ = 13 a_ = 7 a_ = True a_ = True a_ = True a_ = 99 a_ = 32 a_ = 2 a_ = 4 a_ = 37 a_ = "gelu" a_ = 0.1 a_ = 0.1 a_ = 5_12 a_ = 16 a_ = 2 a_ = 0.02 a_ = 3 a_ = 4 a_ = None def UpperCAmelCase__ ( self) ->List[str]: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) a_ = None a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = ids_tensor([self.batch_size] , self.num_choices) a_ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self) ->str: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]: a_ = TFEsmModel(config=__UpperCAmelCase) a_ = {"input_ids": input_ids, "attention_mask": input_mask} a_ = model(__UpperCAmelCase) a_ = [input_ids, input_mask] a_ = model(__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Tuple: a_ = True a_ = TFEsmModel(config=__UpperCAmelCase) a_ = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } a_ = model(__UpperCAmelCase) a_ = [input_ids, input_mask] a_ = model(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase) # Also check the case where encoder outputs are not passed a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = TFEsmForMaskedLM(config=__UpperCAmelCase) a_ = model([input_ids, input_mask]) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = self.num_labels a_ = TFEsmForTokenClassification(config=__UpperCAmelCase) a_ = {"input_ids": input_ids, "attention_mask": input_mask} a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : int = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) a_ : Union[str, Any] = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) a_ : Dict = False a_ : Union[str, Any] = False def UpperCAmelCase__ ( self) ->Any: a_ = TFEsmModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->str: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->Union[str, Any]: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = TFEsmModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @unittest.skip("Protein models do not support embedding resizing.") def UpperCAmelCase__ ( self) ->Any: pass @unittest.skip("Protein models do not support embedding resizing.") def UpperCAmelCase__ ( self) ->Union[str, Any]: pass def UpperCAmelCase__ ( self) ->Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer a_ = model.get_bias() assert isinstance(__UpperCAmelCase , __UpperCAmelCase) for k, v in name.items(): assert isinstance(__UpperCAmelCase , tf.Variable) else: a_ = model.get_output_embeddings() assert x is None a_ = model.get_bias() assert name is None @require_tf class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->str: a_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D") a_ = tf.constant([[0, 1, 2, 3, 4, 5]]) a_ = model(__UpperCAmelCase)[0] a_ = [1, 6, 33] self.assertEqual(list(output.numpy().shape) , __UpperCAmelCase) # compare the actual values for a slice. a_ = tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ]) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2)) @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D") a_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) a_ = model(__UpperCAmelCase)[0] # compare the actual values for a slice. a_ = tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ]) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
303
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class snake_case : def __init__( self) ->Optional[int]: a_ = WATERMARK_BITS a_ = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images] a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2) a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0) return images
303
1
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase_ = 16 UpperCamelCase_ = 32 def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ) ->Optional[Any]: """simple docstring""" a_ = AutoTokenizer.from_pretrained("bert-base-cased" ) a_ = load_dataset("glue" , "mrpc" ) def tokenize_function(UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) a_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a_ = datasets.map( UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. a_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a_ = 16 elif accelerator.mixed_precision != "no": a_ = 8 else: a_ = None return tokenizer.pad( UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. a_ = DataLoader( tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase ) a_ = DataLoader( tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase_ = mocked_dataloaders # noqa: F811 def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1": a_ = 2 # Initialize accelerator a_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a_ = config["lr"] a_ = int(config["num_epochs"] ) a_ = int(config["seed"] ) a_ = int(config["batch_size"] ) a_ = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=UpperCAmelCase ) def inner_training_loop(UpperCAmelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a_ = model.to(accelerator.device ) # Instantiate optimizer a_ = AdamW(params=model.parameters() , lr=UpperCAmelCase ) a_ , a_ = get_dataloaders(UpperCAmelCase , UpperCAmelCase ) # Instantiate scheduler a_ = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a_ , a_ , a_ , a_ , a_ = accelerator.prepare( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Now we train the model for epoch in range(UpperCAmelCase ): model.train() for step, batch in enumerate(UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a_ = model(**UpperCAmelCase ) a_ = outputs.loss accelerator.backward(UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a_ = model(**UpperCAmelCase ) a_ = outputs.logits.argmax(dim=-1 ) a_ , a_ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=UpperCAmelCase , references=UpperCAmelCase , ) a_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCamelCase ( ) ->int: """simple docstring""" a_ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) a_ = parser.parse_args() a_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": main()
303
"""simple docstring""" import math UpperCamelCase_ = 10 UpperCamelCase_ = 7 UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS def UpperCamelCase ( UpperCAmelCase = 20 ) ->str: """simple docstring""" a_ = math.comb(UpperCAmelCase , UpperCAmelCase ) a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase ) a_ = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
303
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=1 ) ->List[str]: """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0 ) ->Dict: """simple docstring""" a_ = [] for old_item in old_list: a_ = old_item.replace("in_layers.0" , "norm1" ) a_ = new_item.replace("in_layers.2" , "conv1" ) a_ = new_item.replace("out_layers.0" , "norm2" ) a_ = new_item.replace("out_layers.3" , "conv2" ) a_ = new_item.replace("emb_layers.1" , "time_emb_proj" ) a_ = new_item.replace("skip_connection" , "conv_shortcut" ) a_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({"old": old_item, "new": new_item} ) return mapping def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0 ) ->Tuple: """simple docstring""" a_ = [] for old_item in old_list: a_ = old_item a_ = new_item.replace("norm.weight" , "group_norm.weight" ) a_ = new_item.replace("norm.bias" , "group_norm.bias" ) a_ = new_item.replace("proj_out.weight" , "proj_attn.weight" ) a_ = new_item.replace("proj_out.bias" , "proj_attn.bias" ) a_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({"old": old_item, "new": new_item} ) return mapping def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) ->List[Any]: """simple docstring""" assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): a_ = old_checkpoint[path] a_ = old_tensor.shape[0] // 3 a_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) a_ = old_tensor.shape[0] // config["num_head_channels"] // 3 a_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) a_ , a_ , a_ = old_tensor.split(channels // num_heads , dim=1 ) a_ = query.reshape(UpperCAmelCase ) a_ = key.reshape(UpperCAmelCase ) a_ = value.reshape(UpperCAmelCase ) for path in paths: a_ = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here a_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" ) a_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" ) a_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: a_ = new_path.replace(replacement["old"] , replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: a_ = old_checkpoint[path["old"]][:, :, 0] else: a_ = old_checkpoint[path["old"]] def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = {} a_ = checkpoint["time_embed.0.weight"] a_ = checkpoint["time_embed.0.bias"] a_ = checkpoint["time_embed.2.weight"] a_ = checkpoint["time_embed.2.bias"] a_ = checkpoint["input_blocks.0.0.weight"] a_ = checkpoint["input_blocks.0.0.bias"] a_ = checkpoint["out.0.weight"] a_ = checkpoint["out.0.bias"] a_ = checkpoint["out.2.weight"] a_ = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only a_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) a_ = { layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only a_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) a_ = { layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only a_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) a_ = { layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): a_ = (i - 1) // (config["num_res_blocks"] + 1) a_ = (i - 1) % (config["num_res_blocks"] + 1) a_ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] a_ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: a_ = checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] a_ = checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue a_ = renew_resnet_paths(UpperCAmelCase ) a_ = {"old": F'''input_blocks.{i}.0''', "new": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} a_ = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): a_ = renew_attention_paths(UpperCAmelCase ) a_ = { "old": F'''input_blocks.{i}.1''', "new": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } a_ = { F'''input_blocks.{i}.1.qkv.bias''': { "key": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', "query": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', "value": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { "key": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', "query": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', "value": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) a_ = middle_blocks[0] a_ = middle_blocks[1] a_ = middle_blocks[2] a_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) a_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) a_ = renew_attention_paths(UpperCAmelCase ) a_ = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): a_ = i // (config["num_res_blocks"] + 1) a_ = i % (config["num_res_blocks"] + 1) a_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] a_ = {} for layer in output_block_layers: a_ , a_ = layer.split("." )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: a_ = [layer_name] if len(UpperCAmelCase ) > 1: a_ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] a_ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] a_ = renew_resnet_paths(UpperCAmelCase ) a_ = renew_resnet_paths(UpperCAmelCase ) a_ = {"old": F'''output_blocks.{i}.0''', "new": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): a_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) a_ = checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] a_ = checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: a_ = [] if len(UpperCAmelCase ): a_ = renew_attention_paths(UpperCAmelCase ) a_ = { "old": F'''output_blocks.{i}.1''', "new": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } a_ = { F'''output_blocks.{i}.1.qkv.bias''': { "key": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', "query": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', "value": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { "key": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', "query": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', "value": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=UpperCAmelCase , ) else: a_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: a_ = ".".join(["output_blocks", str(UpperCAmelCase ), path["old"]] ) a_ = ".".join(["up_blocks", str(UpperCAmelCase ), "resnets", str(UpperCAmelCase ), path["new"]] ) a_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCamelCase_ = json.loads(f.read()) UpperCamelCase_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCamelCase_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCamelCase_ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) UpperCamelCase_ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) UpperCamelCase_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
303
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCamelCase_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: a_ = k.replace(UpperCAmelCase , UpperCAmelCase ) return k def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration: """simple docstring""" a_ = DEFAULTS.copy() cfg_kwargs.update(UpperCAmelCase ) a_ = PegasusConfig(**UpperCAmelCase ) a_ = PegasusForConditionalGeneration(UpperCAmelCase ) a_ = torch_model.model.state_dict() a_ = {} for k, v in tf_weights.items(): a_ = rename_state_dict_key(UpperCAmelCase ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: a_ = v.T a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) a_ = mapping["shared.weight"] a_ = mapping["shared.weight"] a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCAmelCase ) a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) a_ = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict: """simple docstring""" a_ = tf.train.list_variables(UpperCAmelCase ) a_ = {} a_ = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ): a_ = any(pat in name for pat in ignore_name ) if skip_key: continue a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase ) a_ = array return tf_weights def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = Path(UpperCAmelCase ).parent.name a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"] a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCAmelCase ) # convert model a_ = get_tf_weights_as_numpy(UpperCAmelCase ) a_ = task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": a_ = task_specific_params a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase ) torch_model.save_pretrained(UpperCAmelCase ) a_ = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase_ = parser.parse_args() if args.save_dir is None: UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name UpperCamelCase_ = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
303
1
"""simple docstring""" def UpperCamelCase ( ) ->list[list[int]]: """simple docstring""" return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )] UpperCamelCase_ = generate_large_matrix() UpperCamelCase_ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def UpperCamelCase ( UpperCAmelCase ) ->None: """simple docstring""" assert all(row == sorted(UpperCAmelCase , reverse=UpperCAmelCase ) for row in grid ) assert all(list(UpperCAmelCase ) == sorted(UpperCAmelCase , reverse=UpperCAmelCase ) for col in zip(*UpperCAmelCase ) ) def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" a_ = 0 a_ = len(UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: a_ = (left + right) // 2 a_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: a_ = mid + 1 else: a_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" a_ = 0 a_ = len(grid[0] ) for i in range(len(UpperCAmelCase ) ): a_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCAmelCase ) * len(grid[0] )) - total def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" a_ = 0 for row in grid: for i, number in enumerate(UpperCAmelCase ): if number < 0: total += len(UpperCAmelCase ) - i break return total def UpperCamelCase ( ) ->None: """simple docstring""" from timeit import timeit print("Running benchmarks" ) a_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): a_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
303
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = use_labels a_ = scope def UpperCAmelCase__ ( self) ->Any: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self) ->Optional[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self) ->List[str]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str: a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]: a_ = True a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]: a_ = True a_ = True a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval() # first forward pass a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple: a_ = BertGenerationDecoder(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self) ->str: a_ , a_ , a_ , a_ = self.prepare_config_and_inputs() a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () a_ : List[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = BertGenerationEncoderTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs() a_ = "bert" self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: # This regression test was failing with PyTorch < 1.3 ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() a_ = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(__UpperCAmelCase) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 10_24]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 5_03_58]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : str = KandinskyVaaPriorPipeline a_ : int = ["""prompt"""] a_ : int = ["""prompt""", """negative_prompt"""] a_ : Union[str, Any] = [ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] a_ : Union[str, Any] = False @property def UpperCAmelCase__ ( self) ->List[str]: return 32 @property def UpperCAmelCase__ ( self) ->Optional[int]: return 32 @property def UpperCAmelCase__ ( self) ->List[str]: return self.time_input_dim @property def UpperCAmelCase__ ( self) ->str: return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self) ->int: return 1_00 @property def UpperCAmelCase__ ( self) ->str: a_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def UpperCAmelCase__ ( self) ->str: torch.manual_seed(0) a_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__UpperCAmelCase) @property def UpperCAmelCase__ ( self) ->Tuple: torch.manual_seed(0) a_ = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } a_ = PriorTransformer(**__UpperCAmelCase) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 a_ = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def UpperCAmelCase__ ( self) ->List[Any]: torch.manual_seed(0) a_ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) a_ = CLIPVisionModelWithProjection(__UpperCAmelCase) return model @property def UpperCAmelCase__ ( self) ->int: a_ = CLIPImageProcessor( crop_size=2_24 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , ) return image_processor def UpperCAmelCase__ ( self) ->Dict: a_ = self.dummy_prior a_ = self.dummy_image_encoder a_ = self.dummy_text_encoder a_ = self.dummy_tokenizer a_ = self.dummy_image_processor a_ = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , ) a_ = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0) ->List[str]: if str(__UpperCAmelCase).startswith("mps"): a_ = torch.manual_seed(__UpperCAmelCase) else: a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a_ = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def UpperCAmelCase__ ( self) ->Optional[int]: a_ = "cpu" a_ = self.get_dummy_components() a_ = self.pipeline_class(**__UpperCAmelCase) a_ = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a_ = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a_ = output.image_embeds a_ = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a_ = image[0, -10:] a_ = image_from_tuple[0, -10:] assert image.shape == (1, 32) a_ = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @skip_mps def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = torch_device == "cpu" a_ = True a_ = False self._test_inference_batch_single_identical( test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , ) @skip_mps def UpperCAmelCase__ ( self) ->Dict: a_ = torch_device == "cpu" a_ = False self._test_attention_slicing_forward_pass( test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
303
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if "resnet-50" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase ) # set label attributes a_ = "panoptic" in model_name if is_panoptic: a_ = 250 else: a_ = 91 a_ = "huggingface/label-files" a_ = "coco-detection-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = state_dict.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = "" if is_panoptic: a_ = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a_ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a_ = in_proj_weight_cross_attn[:256, :] a_ = in_proj_bias_cross_attn[:256] a_ = in_proj_weight_cross_attn[256:512, :] a_ = in_proj_bias_cross_attn[256:512] a_ = in_proj_weight_cross_attn[-256:, :] a_ = in_proj_bias_cross_attn[-256:] def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]: """simple docstring""" a_ , a_ = get_detr_config(UpperCAmelCase ) # load original model from torch hub a_ = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(F'''Converting model {model_name}...''' ) a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval() a_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase ): if is_panoptic: a_ = "detr." + src rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a_ = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a_ = state_dict.pop(UpperCAmelCase ) a_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val # finally, create HuggingFace model and load state dict a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) model.eval() # verify our conversion on an image a_ = "coco_panoptic" if is_panoptic else "coco_detection" a_ = DetrImageProcessor(format=UpperCAmelCase ) a_ = processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = detr(UpperCAmelCase ) a_ = model(UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if "resnet-50" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase ) # set label attributes a_ = "panoptic" in model_name if is_panoptic: a_ = 250 else: a_ = 91 a_ = "huggingface/label-files" a_ = "coco-detection-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = state_dict.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = "" if is_panoptic: a_ = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a_ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a_ = in_proj_weight_cross_attn[:256, :] a_ = in_proj_bias_cross_attn[:256] a_ = in_proj_weight_cross_attn[256:512, :] a_ = in_proj_bias_cross_attn[256:512] a_ = in_proj_weight_cross_attn[-256:, :] a_ = in_proj_bias_cross_attn[-256:] def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]: """simple docstring""" a_ , a_ = get_detr_config(UpperCAmelCase ) # load original model from torch hub a_ = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(F'''Converting model {model_name}...''' ) a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval() a_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase ): if is_panoptic: a_ = "detr." + src rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a_ = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a_ = state_dict.pop(UpperCAmelCase ) a_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val # finally, create HuggingFace model and load state dict a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) model.eval() # verify our conversion on an image a_ = "coco_panoptic" if is_panoptic else "coco_detection" a_ = DetrImageProcessor(format=UpperCAmelCase ) a_ = processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = detr(UpperCAmelCase ) a_ = model(UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase ( UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: a_ = [3, 3, 3, 3] a_ = [5, 5, 5, 5] elif "fl4" in model_name: a_ = [4, 4, 4, 4] a_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: a_ = [3, 3, 3, 3] if "lrf" in model_name: a_ = [3, 3, 3, 3] else: a_ = [2, 2, 2, 2] if "tiny" in model_name: a_ = 96 elif "small" in model_name: a_ = 96 elif "base" in model_name: a_ = 128 elif "large" in model_name: a_ = 192 elif "xlarge" in model_name: a_ = 256 elif "huge" in model_name: a_ = 352 # set label information a_ = "huggingface/label-files" if "large" in model_name or "huge" in model_name: a_ = "imagenet-22k-id2label.json" else: a_ = "imagenet-1k-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = {v: k for k, v in idalabel.items()} a_ = FocalNetConfig( embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , ) return config def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" if "patch_embed.proj" in name: a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a_ = "encoder." + name if "encoder.layers" in name: a_ = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: a_ = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: a_ = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: a_ = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: a_ = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: a_ = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": a_ = "layernorm.weight" if name == "norm.bias": a_ = "layernorm.bias" if "head" in name: a_ = name.replace("head" , "classifier" ) else: a_ = "focalnet." + name return name def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict: """simple docstring""" a_ = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on a_ = model_name_to_url[model_name] print("Checkpoint URL: " , UpperCAmelCase ) a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): a_ = state_dict.pop(UpperCAmelCase ) a_ = val a_ = get_focalnet_config(UpperCAmelCase ) a_ = FocalNetForImageClassification(UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(UpperCAmelCase ) # verify conversion a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = BitImageProcessor( do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , ) a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) a_ = processor(images=UpperCAmelCase , return_tensors="pt" ) a_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 ) a_ = model(**UpperCAmelCase ) a_ = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": a_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": a_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": a_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": a_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": a_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": a_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCamelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Optional[int]: super().__init__() if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 .") self.register_modules( speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , ) def UpperCAmelCase__ ( self , __UpperCAmelCase = "auto") ->int: if slice_size == "auto": a_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: self.enable_attention_slicing(__UpperCAmelCase) @torch.no_grad() def __call__( self , __UpperCAmelCase , __UpperCAmelCase=1_60_00 , __UpperCAmelCase = 5_12 , __UpperCAmelCase = 5_12 , __UpperCAmelCase = 50 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ) ->int: a_ = self.speech_processor.feature_extractor( __UpperCAmelCase , return_tensors="pt" , sampling_rate=__UpperCAmelCase).input_features.to(self.device) a_ = self.speech_model.generate(__UpperCAmelCase , max_length=48_00_00) a_ = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase)[ 0 ] if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = len(__UpperCAmelCase) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase)}''') if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''') if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(__UpperCAmelCase)}.''') # get prompt text embeddings a_ = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) a_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: a_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''') a_ = text_input_ids[:, : self.tokenizer.model_max_length] a_ = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method a_ , a_ , a_ = text_embeddings.shape a_ = text_embeddings.repeat(1 , __UpperCAmelCase , 1) a_ = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a_ = 42 if negative_prompt is None: a_ = [""] * batch_size elif type(__UpperCAmelCase) is not type(__UpperCAmelCase): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase)} !=''' F''' {type(__UpperCAmelCase)}.''') elif isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [negative_prompt] elif batch_size != len(__UpperCAmelCase): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase)}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`.") else: a_ = negative_prompt a_ = text_input_ids.shape[-1] a_ = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) a_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method a_ = uncond_embeddings.shape[1] a_ = uncond_embeddings.repeat(1 , __UpperCAmelCase , 1) a_ = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a_ = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) a_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps a_ = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase).to( self.device) else: a_ = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''') a_ = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand a_ = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler a_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a_ = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) a_ = {} if accepts_eta: a_ = eta for i, t in enumerate(self.progress_bar(__UpperCAmelCase)): # expand the latents if we are doing classifier free guidance a_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents a_ = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase) # predict the noise residual a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase).sample # perform guidance if do_classifier_free_guidance: a_ , a_ = noise_pred.chunk(2) a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 a_ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a_ = 1 / 0.18_215 * latents a_ = self.vae.decode(__UpperCAmelCase).sample a_ = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 a_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": a_ = self.numpy_to_pil(__UpperCAmelCase) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase)
303
"""simple docstring""" import os import numpy import onnx def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = a.name a_ = b.name a_ = "" a_ = "" a_ = a == b a_ = name_a a_ = name_b return res def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = list(model.graph.initializer ) a_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a_ = inits[i].name a_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = os.path.dirname(UpperCAmelCase ) a_ = os.path.basename(UpperCAmelCase ) a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) a_ = list(model.graph.initializer ) a_ = set() a_ = {} a_ = [] a_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) a_ = inits[j].data_type a_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , UpperCAmelCase ) total_reduced_size += mem_size a_ = inits[i].name a_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: a_ = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" ) a_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = "optimized_" + model_file_name a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->list: """simple docstring""" a_ = len(UpperCAmelCase ) for _ in range(UpperCAmelCase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: a_ , a_ = arr[i + 1], arr[i] return arr if __name__ == "__main__": UpperCamelCase_ = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
303
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str: a_ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } a_ = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): a_ = token_dict["token"] a_ = Tokenizer(Unigram()) a_ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}") , " "), normalizers.Lowercase(), ]) a_ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase), pre_tokenizers.Digits(individual_digits=__UpperCAmelCase), pre_tokenizers.Punctuation(), ]) a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase) a_ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) a_ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [files] self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = json.loads(self._tokenizer.to_str()) a_ = self.special_tokens["unk"]["id"] a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
303
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: UpperCamelCase_ = None UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase_ = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } UpperCamelCase_ = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } UpperCamelCase_ = '▁' class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = VOCAB_FILES_NAMES a_ : int = PRETRAINED_VOCAB_FILES_MAP a_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Optional[Any] = AlbertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) ->str: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. a_ = ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) a_ = do_lower_case a_ = remove_space a_ = keep_accents a_ = vocab_file a_ = False if not self.vocab_file else True def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__UpperCAmelCase): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return a_ = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase): copyfile(self.vocab_file , __UpperCAmelCase) return (out_vocab_file,)
303
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
303
1
"""simple docstring""" import argparse import os import re UpperCamelCase_ = 'src/transformers' # Pattern that looks at the indentation in a line. UpperCamelCase_ = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. UpperCamelCase_ = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCamelCase_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. UpperCamelCase_ = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCamelCase_ = re.compile(R'\[([^\]]+)\]') def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = _re_indent.search(UpperCAmelCase ) return "" if search is None else search.groups()[0] def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase=None , UpperCAmelCase=None ) ->Any: """simple docstring""" a_ = 0 a_ = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(UpperCAmelCase ): index += 1 a_ = ["\n".join(lines[:index] )] else: a_ = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). a_ = [lines[index]] index += 1 while index < len(UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(UpperCAmelCase ) ) if index < len(UpperCAmelCase ) - 1: a_ = [lines[index + 1]] index += 1 else: a_ = [] else: blocks.append("\n".join(UpperCAmelCase ) ) a_ = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(UpperCAmelCase ) > 0: blocks.append("\n".join(UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(UpperCAmelCase ): blocks.append("\n".join(lines[index:] ) ) return blocks def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" def _inner(UpperCAmelCase ): return key(UpperCAmelCase ).lower().replace("_" , "" ) return _inner def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None ) ->Tuple: """simple docstring""" def noop(UpperCAmelCase ): return x if key is None: a_ = noop # Constants are all uppercase, they go first. a_ = [obj for obj in objects if key(UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. a_ = [obj for obj in objects if key(UpperCAmelCase )[0].isupper() and not key(UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. a_ = [obj for obj in objects if not key(UpperCAmelCase )[0].isupper()] a_ = ignore_underscore(UpperCAmelCase ) return sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" def _replace(UpperCAmelCase ): a_ = match.groups()[0] if "," not in imports: return F'''[{imports}]''' a_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: a_ = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) + "]" a_ = import_statement.split("\n" ) if len(UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. a_ = 2 if lines[1].strip() == "[" else 1 a_ = [(i, _re_strip_line.search(UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] a_ = sort_objects(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] ) a_ = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: a_ = _re_bracket_content.sub(_replace , lines[1] ) else: a_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: a_ = keys[:-1] a_ = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) return "\n".join(UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line a_ = _re_bracket_content.sub(_replace , UpperCAmelCase ) return import_statement def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=True ) ->Union[str, Any]: """simple docstring""" with open(UpperCAmelCase , encoding="utf-8" ) as f: a_ = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 a_ = split_code_in_indented_blocks( UpperCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. a_ = main_blocks[block_idx] a_ = block.split("\n" ) # Get to the start of the imports. a_ = 0 while line_idx < len(UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: a_ = len(UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. a_ = "\n".join(block_lines[line_idx:-1] ) a_ = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. a_ = split_code_in_indented_blocks(UpperCAmelCase , indent_level=UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend a_ = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. a_ = [(pattern.search(UpperCAmelCase ).groups()[0] if pattern.search(UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. a_ = [(i, key) for i, key in enumerate(UpperCAmelCase ) if key is not None] a_ = [x[0] for x in sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. a_ = 0 a_ = [] for i in range(len(UpperCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: a_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. a_ = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(UpperCAmelCase ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write("\n".join(UpperCAmelCase ) ) def UpperCamelCase ( UpperCAmelCase=True ) ->List[str]: """simple docstring""" a_ = [] for root, _, files in os.walk(UpperCAmelCase ): if "__init__.py" in files: a_ = sort_imports(os.path.join(UpperCAmelCase , "__init__.py" ) , check_only=UpperCAmelCase ) if result: a_ = [os.path.join(UpperCAmelCase , "__init__.py" )] if len(UpperCAmelCase ) > 0: raise ValueError(F'''Would overwrite {len(UpperCAmelCase )} files, run `make style`.''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') UpperCamelCase_ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
303
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = """audio-spectrogram-transformer""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = patch_size a_ = qkv_bias a_ = frequency_stride a_ = time_stride a_ = max_length a_ = num_mel_bins
303
1
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests UpperCamelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly
303
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = """xlm-roberta""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = """deberta-v2""" def __init__( self , __UpperCAmelCase=12_81_00 , __UpperCAmelCase=15_36 , __UpperCAmelCase=24 , __UpperCAmelCase=24 , __UpperCAmelCase=61_44 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ) ->Optional[Any]: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = relative_attention a_ = max_relative_positions a_ = pad_token_id a_ = position_biased_input # Backwards compatibility if type(__UpperCAmelCase) == str: a_ = [x.strip() for x in pos_att_type.lower().split("|")] a_ = pos_att_type a_ = vocab_size a_ = layer_norm_eps a_ = kwargs.get("pooler_hidden_size" , __UpperCAmelCase) a_ = pooler_dropout a_ = pooler_hidden_act class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)]) @property def UpperCAmelCase__ ( self) ->int: return 12 def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 40 , __UpperCAmelCase = 40 , __UpperCAmelCase = None , ) ->Mapping[str, Any]: a_ = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
303
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = scope a_ = range_bbox def UpperCAmelCase__ ( self) ->int: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: a_ = bbox[i, j, 3] a_ = bbox[i, j, 1] a_ = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ = bbox[i, j, 2] a_ = bbox[i, j, 0] a_ = t a_ = None if self.use_input_mask: a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self) ->List[str]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any: a_ = LiltModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]: a_ = self.num_labels a_ = LiltForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict: a_ = LiltForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self) ->str: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a_ : List[str] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) a_ : Any = False a_ : Dict = False def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: return True def UpperCAmelCase__ ( self) ->str: a_ = LiltModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->List[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = LiltModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @require_torch @slow class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->List[Any]: a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase) a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase) a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase) # forward pass with torch.no_grad(): a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase) a_ = torch.Size([1, 2, 7_68]) a_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
303
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = """time_series_transformer""" a_ : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "student_t" , __UpperCAmelCase = "nll" , __UpperCAmelCase = 1 , __UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , __UpperCAmelCase = "mean" , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = True , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 64 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 1_00 , __UpperCAmelCase = 0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ) ->Optional[int]: # time series specific configuration a_ = prediction_length a_ = context_length or prediction_length a_ = distribution_output a_ = loss a_ = input_size a_ = num_time_features a_ = lags_sequence a_ = scaling a_ = num_dynamic_real_features a_ = num_static_real_features a_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__UpperCAmelCase) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`") a_ = cardinality else: a_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__UpperCAmelCase) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`") a_ = embedding_dimension else: a_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality] a_ = num_parallel_samples # Transformer architecture configuration a_ = input_size * len(__UpperCAmelCase) + self._number_of_features a_ = d_model a_ = encoder_attention_heads a_ = decoder_attention_heads a_ = encoder_ffn_dim a_ = decoder_ffn_dim a_ = encoder_layers a_ = decoder_layers a_ = dropout a_ = attention_dropout a_ = activation_dropout a_ = encoder_layerdrop a_ = decoder_layerdrop a_ = activation_function a_ = init_std a_ = use_cache super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase) @property def UpperCAmelCase__ ( self) ->int: return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
303
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case ( SCREAMING_SNAKE_CASE_ ): def UpperCAmelCase__ ( self) ->Any: a_ = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim")) self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads")) class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]: a_ = parent a_ = batch_size a_ = image_size a_ = patch_sizes a_ = patch_stride a_ = patch_padding a_ = is_training a_ = use_labels a_ = num_labels a_ = num_channels a_ = embed_dim a_ = num_heads a_ = stride_kv a_ = depth a_ = cls_token a_ = attention_drop_rate a_ = initializer_range a_ = layer_norm_eps def UpperCAmelCase__ ( self) ->Any: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a_ = None if self.use_labels: # create a random int32 tensor of given shape a_ = ids_tensor([self.batch_size] , self.num_labels) a_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self) ->Union[str, Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]: a_ = TFCvtModel(config=__UpperCAmelCase) a_ = model(__UpperCAmelCase , training=__UpperCAmelCase) a_ = (self.image_size, self.image_size) a_ , a_ = image_size[0], image_size[1] for i in range(len(self.depth)): a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = self.num_labels a_ = TFCvtForImageClassification(__UpperCAmelCase) a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self) ->Tuple: a_ = self.prepare_config_and_inputs() a_ , a_ , a_ = config_and_inputs a_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () a_ : List[Any] = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) a_ : Any = False a_ : Dict = False a_ : Optional[int] = False a_ : List[Any] = False a_ : List[Any] = False def UpperCAmelCase__ ( self) ->List[str]: a_ = TFCvtModelTester(self) a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[str]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def UpperCAmelCase__ ( self) ->Dict: pass @unittest.skip(reason="Cvt does not use inputs_embeds") def UpperCAmelCase__ ( self) ->List[str]: pass @unittest.skip(reason="Cvt does not support input and output embeddings") def UpperCAmelCase__ ( self) ->Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def UpperCAmelCase__ ( self) ->Dict: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def UpperCAmelCase__ ( self) ->List[str]: super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8") def UpperCAmelCase__ ( self) ->Dict: a_ = tf.keras.mixed_precision.Policy("mixed_float16") tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32") def UpperCAmelCase__ ( self) ->Optional[int]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) a_ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase): a_ = model_class(__UpperCAmelCase) a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a_ = outputs.hidden_states a_ = len(self.model_tester.depth) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = TFCvtModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self) ->int: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def UpperCAmelCase__ ( self) ->Any: a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf") # forward pass a_ = model(**__UpperCAmelCase) # verify the logits a_ = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , __UpperCAmelCase) a_ = tf.constant([0.9_285, 0.9_015, -0.3_150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" import sys from collections import defaultdict class snake_case : def __init__( self) ->Union[str, Any]: a_ = [] def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int: return self.node_position[vertex] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = pos def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: a_ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: a_ = 2 * start + 1 else: a_ = 2 * start + 2 if heap[smallest_child] < heap[start]: a_ , a_ = heap[smallest_child], positions[smallest_child] a_ , a_ = ( heap[start], positions[start], ) a_ , a_ = temp, tempa a_ = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , __UpperCAmelCase) self.top_to_bottom(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]: a_ = position[index] while index != 0: a_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: a_ = heap[parent] a_ = position[parent] self.set_position(position[parent] , __UpperCAmelCase) else: a_ = val a_ = temp self.set_position(__UpperCAmelCase , __UpperCAmelCase) break a_ = parent else: a_ = val a_ = temp self.set_position(__UpperCAmelCase , 0) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = len(__UpperCAmelCase) // 2 - 1 for i in range(__UpperCAmelCase , -1 , -1): self.top_to_bottom(__UpperCAmelCase , __UpperCAmelCase , len(__UpperCAmelCase) , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict: a_ = positions[0] a_ = sys.maxsize self.top_to_bottom(__UpperCAmelCase , 0 , len(__UpperCAmelCase) , __UpperCAmelCase) return temp def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]: """simple docstring""" a_ = Heap() a_ = [0] * len(UpperCAmelCase ) a_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph a_ = [] # Heap of Distance of vertices from their neighboring vertex a_ = [] for vertex in range(len(UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase ) heap.node_position.append(UpperCAmelCase ) a_ = [] a_ = 1 a_ = sys.maxsize for neighbor, distance in adjacency_list[0]: a_ = 0 a_ = distance heap.heapify(UpperCAmelCase , UpperCAmelCase ) for _ in range(1 , len(UpperCAmelCase ) ): a_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) a_ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase )] ): a_ = distance heap.bottom_to_top( UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase ) a_ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCamelCase_ = int(input('Enter number of edges: ').strip()) UpperCamelCase_ = defaultdict(list) for _ in range(edges_number): UpperCamelCase_ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
303
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Dict = """Speech2TextFeatureExtractor""" a_ : str = """Speech2TextTokenizer""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: super().__init__(__UpperCAmelCase , __UpperCAmelCase) a_ = self.feature_extractor a_ = False def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") a_ = kwargs.pop("raw_speech") else: a_ = kwargs.pop("audio" , __UpperCAmelCase) a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase) a_ = kwargs.pop("text" , __UpperCAmelCase) if len(__UpperCAmelCase) > 0: a_ = args[0] a_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif audio is None: return encodings else: a_ = encodings["input_ids"] return inputs def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase) @contextmanager def UpperCAmelCase__ ( self) ->Tuple: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call.") a_ = True a_ = self.tokenizer yield a_ = self.feature_extractor a_ = False
303
1
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING UpperCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , **__UpperCAmelCase) ->int: super().__init__(**__UpperCAmelCase) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''') self.check_model_type(__UpperCAmelCase) def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Union[str, Any]: a_ = {} a_ = {} a_ = {} # preprocess args if "points_per_batch" in kwargs: a_ = kwargs["points_per_batch"] if "points_per_crop" in kwargs: a_ = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: a_ = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: a_ = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: a_ = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: a_ = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: a_ = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: a_ = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: a_ = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: a_ = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: a_ = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: a_ = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase) ->str: return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_12 / 15_00 , __UpperCAmelCase = 32 , __UpperCAmelCase = 1 , ) ->Union[str, Any]: a_ = load_image(__UpperCAmelCase) a_ = self.image_processor.size["longest_edge"] a_ , a_ , a_ , a_ = self.image_processor.generate_crop_boxes( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a_ = self.image_processor(images=__UpperCAmelCase , return_tensors="pt") with self.device_placement(): if self.framework == "pt": a_ = self.get_inference_context() with inference_context(): a_ = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device) a_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values")) a_ = image_embeddings a_ = grid_points.shape[1] a_ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , __UpperCAmelCase , __UpperCAmelCase): a_ = grid_points[:, i : i + points_per_batch, :, :] a_ = input_labels[:, i : i + points_per_batch] a_ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ) ->Optional[int]: a_ = model_inputs.pop("input_boxes") a_ = model_inputs.pop("is_last") a_ = model_inputs.pop("original_sizes").tolist() a_ = model_inputs.pop("reshaped_input_sizes").tolist() a_ = self.model(**__UpperCAmelCase) # post processing happens here in order to avoid CPU GPU copies of ALL the masks a_ = model_outputs["pred_masks"] a_ = self.image_processor.post_process_masks( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase) a_ = model_outputs["iou_scores"] a_ , a_ , a_ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ) ->Optional[Any]: a_ = [] a_ = [] a_ = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) a_ = torch.cat(__UpperCAmelCase) a_ = torch.cat(__UpperCAmelCase) a_ , a_ , a_ , a_ = self.image_processor.post_process_for_mask_generation( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a_ = defaultdict(__UpperCAmelCase) for output in model_outputs: for k, v in output.items(): extra[k].append(__UpperCAmelCase) a_ = {} if output_rle_mask: a_ = rle_mask if output_bboxes_mask: a_ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->Any: a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = patch_size a_ = text_seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = coordinate_size a_ = shape_size a_ = num_labels a_ = num_choices a_ = scope a_ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a_ = text_seq_length a_ = (image_size // patch_size) ** 2 + 1 a_ = self.text_seq_length + self.image_seq_length def UpperCAmelCase__ ( self) ->Tuple: a_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size) a_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: a_ = bbox[i, j, 3] a_ = bbox[i, j, 1] a_ = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ = bbox[i, j, 2] a_ = bbox[i, j, 0] a_ = t a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.text_seq_length]) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size) a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels) a_ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = LayoutLMvaModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() # text + image a_ = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase) a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # text only a_ = model(__UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size)) # image only a_ = model(pixel_values=__UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = self.num_labels a_ = LayoutLMvaForSequenceClassification(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = self.num_labels a_ = LayoutLMvaForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: a_ = LayoutLMvaForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = False a_ : int = False a_ : Dict = False a_ : str = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) a_ : str = ( {"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel} if is_torch_available() else {} ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def UpperCAmelCase__ ( self) ->Any: a_ = LayoutLMvaModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]: a_ = copy.deepcopy(__UpperCAmelCase) if model_class in get_values(__UpperCAmelCase): a_ = { k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous() if isinstance(__UpperCAmelCase , torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__UpperCAmelCase): a_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase) elif model_class in get_values(__UpperCAmelCase): a_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase) a_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase) elif model_class in [ *get_values(__UpperCAmelCase), ]: a_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase) elif model_class in [ *get_values(__UpperCAmelCase), ]: a_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__UpperCAmelCase , ) return inputs_dict def UpperCAmelCase__ ( self) ->Optional[int]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->Optional[Any]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = LayoutLMvaModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def UpperCamelCase ( ) ->Any: """simple docstring""" a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self) ->str: return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase) if is_vision_available() else None @slow def UpperCAmelCase__ ( self) ->Dict: a_ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(__UpperCAmelCase) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt").pixel_values.to(__UpperCAmelCase) a_ = torch.tensor([[1, 2]]) a_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass a_ = model( input_ids=input_ids.to(__UpperCAmelCase) , bbox=bbox.to(__UpperCAmelCase) , pixel_values=pixel_values.to(__UpperCAmelCase) , ) # verify the logits a_ = torch.Size((1, 1_99, 7_68)) self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase) a_ = torch.tensor( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]]).to(__UpperCAmelCase) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = ["""vqvae"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict: super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: return 50 if isinstance(self.scheduler , __UpperCAmelCase) else 10_00 @torch.no_grad() def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=True , ) ->Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: a_ = steps or self.get_default_steps() self.scheduler.set_timesteps(__UpperCAmelCase) a_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size) == int: a_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: a_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__UpperCAmelCase , device=self.device , ) a_ = noise a_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase) a_ = self.mel.audio_slice_to_image(__UpperCAmelCase) a_ = np.frombuffer(input_image.tobytes() , dtype="uint8").reshape( (input_image.height, input_image.width)) a_ = (input_image / 2_55) * 2 - 1 a_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device) if self.vqvae is not None: a_ = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0)).latent_dist.sample( generator=__UpperCAmelCase)[0] a_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: a_ = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1]) a_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) a_ = int(mask_start_secs * pixels_per_second) a_ = int(mask_end_secs * pixels_per_second) a_ = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:])) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): if isinstance(self.unet , __UpperCAmelCase): a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)["sample"] else: a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase)["sample"] if isinstance(self.scheduler , __UpperCAmelCase): a_ = self.scheduler.step( model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"] else: a_ = self.scheduler.step( model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"] if mask is not None: if mask_start > 0: a_ = mask[:, step, :, :mask_start] if mask_end > 0: a_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance a_ = 1 / self.vqvae.config.scaling_factor * images a_ = self.vqvae.decode(__UpperCAmelCase)["sample"] a_ = (images / 2 + 0.5).clamp(0 , 1) a_ = images.cpu().permute(0 , 2 , 3 , 1).numpy() a_ = (images * 2_55).round().astype("uint8") a_ = list( (Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__UpperCAmelCase , mode="RGB").convert("L") for _ in images)) a_ = [self.mel.image_to_audio(__UpperCAmelCase) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase)[:, np.newaxis, :]) , **ImagePipelineOutput(__UpperCAmelCase)) @torch.no_grad() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 50) ->np.ndarray: assert isinstance(self.scheduler , __UpperCAmelCase) self.scheduler.set_timesteps(__UpperCAmelCase) a_ = np.array( [np.frombuffer(image.tobytes() , dtype="uint8").reshape((1, image.height, image.width)) for image in images]) a_ = (sample / 2_55) * 2 - 1 a_ = torch.Tensor(__UpperCAmelCase).to(self.device) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))): a_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps a_ = self.scheduler.alphas_cumprod[t] a_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) a_ = 1 - alpha_prod_t a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase)["sample"] a_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output a_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) a_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCAmelCase__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->torch.Tensor: a_ = acos(torch.dot(torch.flatten(__UpperCAmelCase) , torch.flatten(__UpperCAmelCase)) / torch.norm(__UpperCAmelCase) / torch.norm(__UpperCAmelCase)) return sin((1 - alpha) * theta) * xa / sin(__UpperCAmelCase) + sin(alpha * theta) * xa / sin(__UpperCAmelCase)
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase_ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" a_ = create_tensor(UpperCAmelCase ) a_ = gather(UpperCAmelCase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = [state.process_index] a_ = gather_object(UpperCAmelCase ) assert len(UpperCAmelCase ) == state.num_processes, F'''{gathered_obj}, {len(UpperCAmelCase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" a_ = create_tensor(UpperCAmelCase ) a_ = broadcast(UpperCAmelCase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def UpperCamelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" if state.is_main_process: a_ = torch.arange(state.num_processes + 1 ).to(state.device ) else: a_ = torch.arange(state.num_processes ).to(state.device ) a_ = pad_across_processes(UpperCAmelCase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" if state.num_processes != 2: return a_ = create_tensor(UpperCAmelCase ) a_ = reduce(UpperCAmelCase , "sum" ) a_ = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(UpperCAmelCase , UpperCAmelCase ), F'''{reduced_tensor} != {truth_tensor}''' def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if state.num_processes != 2: return a_ = create_tensor(UpperCAmelCase ) a_ = reduce(UpperCAmelCase , "mean" ) a_ = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(UpperCAmelCase , UpperCAmelCase ), F'''{reduced_tensor} != {truth_tensor}''' def UpperCamelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" main() def UpperCamelCase ( ) ->List[str]: """simple docstring""" a_ = PartialState() state.print(F'''State: {state}''' ) state.print("testing gather" ) test_gather(UpperCAmelCase ) state.print("testing gather_object" ) test_gather_object(UpperCAmelCase ) state.print("testing broadcast" ) test_broadcast(UpperCAmelCase ) state.print("testing pad_across_processes" ) test_pad_across_processes(UpperCAmelCase ) state.print("testing reduce_sum" ) test_reduce_sum(UpperCAmelCase ) state.print("testing reduce_mean" ) test_reduce_mean(UpperCAmelCase ) if __name__ == "__main__": main()
303
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->bool: """simple docstring""" if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a_ = sorted(string.lower() ) return len(UpperCAmelCase ) == len(set(UpperCAmelCase ) ) if __name__ == "__main__": UpperCamelCase_ = input('Enter a string ').strip() UpperCamelCase_ = is_isogram(input_str) print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
303
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase_ = 701 UpperCamelCase_ = 1000000000 UpperCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
303
1
"""simple docstring""" import fire from utils import calculate_rouge, save_json def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) ->Dict: """simple docstring""" a_ = [x.strip() for x in open(UpperCAmelCase ).readlines()] a_ = [x.strip() for x in open(UpperCAmelCase ).readlines()][: len(UpperCAmelCase )] a_ = calculate_rouge(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) if save_path is not None: save_json(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
303
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) @torch.no_grad() def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 1_00 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , ) ->Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: a_ = self.unet.config.sample_size / self.unet.config.sample_rate a_ = audio_length_in_s * self.unet.config.sample_rate a_ = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''') a_ = int(__UpperCAmelCase) if sample_size % down_scale_factor != 0: a_ = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' " process.") a_ = int(__UpperCAmelCase) a_ = next(iter(self.unet.parameters())).dtype a_ = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase) and len(__UpperCAmelCase) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(__UpperCAmelCase)}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''') a_ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase) # set step values self.scheduler.set_timesteps(__UpperCAmelCase , device=audio.device) a_ = self.scheduler.timesteps.to(__UpperCAmelCase) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase).sample # 2. compute previous image: x_t -> t_t-1 a_ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample a_ = audio.clamp(-1 , 1).float().cpu().numpy() a_ = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__UpperCAmelCase)
303
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Dict: a_ = inspect.getfile(accelerate.test_utils) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) a_ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def UpperCAmelCase__ ( self) ->Any: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->str: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''') with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->List[Any]: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) if __name__ == "__main__": UpperCamelCase_ = Accelerator() UpperCamelCase_ = (accelerator.state.process_index + 2, 10) UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase_ = '' UpperCamelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
303
1
"""simple docstring""" from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
303
"""simple docstring""" from heapq import heappop, heappush import numpy as np def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" a_ , a_ = grid.shape a_ = [-1, 1, 0, 0] a_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] a_ , a_ = [(0, source)], set() a_ = np.full((rows, cols) , np.inf ) a_ = 0 a_ = np.empty((rows, cols) , dtype=UpperCAmelCase ) a_ = None while queue: ((a_) , (a_)) = heappop(UpperCAmelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: a_ = [] while (x, y) != source: path.append((x, y) ) a_ , a_ = predecessors[x, y] path.append(UpperCAmelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase ) ): a_ , a_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: a_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase , (dist + 1, (nx, ny)) ) a_ = dist + 1 a_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
303
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run UpperCamelCase_ = True except (ImportError, AttributeError): UpperCamelCase_ = object def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Dict: """simple docstring""" pass UpperCamelCase_ = False UpperCamelCase_ = logging.get_logger('transformers-cli/serving') def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" a_ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(UpperCAmelCase , args.host , args.port , args.workers ) class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : dict class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[str] a_ : Optional[List[int]] class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Any class snake_case ( SCREAMING_SNAKE_CASE_ ): @staticmethod def UpperCAmelCase__ ( __UpperCAmelCase) ->int: a_ = parser.add_parser( "serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints.") serve_parser.add_argument( "--task" , type=__UpperCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , ) serve_parser.add_argument("--host" , type=__UpperCAmelCase , default="localhost" , help="Interface the server will listen on.") serve_parser.add_argument("--port" , type=__UpperCAmelCase , default=88_88 , help="Port the serving will listen to.") serve_parser.add_argument("--workers" , type=__UpperCAmelCase , default=1 , help="Number of http workers") serve_parser.add_argument("--model" , type=__UpperCAmelCase , help="Model's name or path to stored model.") serve_parser.add_argument("--config" , type=__UpperCAmelCase , help="Model's config name or path to stored model.") serve_parser.add_argument("--tokenizer" , type=__UpperCAmelCase , help="Tokenizer name to use.") serve_parser.add_argument( "--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) serve_parser.set_defaults(func=__UpperCAmelCase) def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any: a_ = pipeline a_ = host a_ = port a_ = workers if not _serve_dependencies_installed: raise RuntimeError( "Using serve command requires FastAPI and uvicorn. " "Please install transformers with [serving]: pip install \"transformers[serving]\"." "Or install FastAPI and uvicorn separately.") else: logger.info(F'''Serving model over {host}:{port}''') a_ = FastAPI( routes=[ APIRoute( "/" , self.model_info , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["GET"] , ), APIRoute( "/tokenize" , self.tokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ), APIRoute( "/detokenize" , self.detokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ), APIRoute( "/forward" , self.forward , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ), ] , timeout=6_00 , ) def UpperCAmelCase__ ( self) ->Optional[Any]: run(self._app , host=self.host , port=self.port , workers=self.workers) def UpperCAmelCase__ ( self) ->Tuple: return ServeModelInfoResult(infos=vars(self._pipeline.model.config)) def UpperCAmelCase__ ( self , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase)) ->str: try: a_ = self._pipeline.tokenizer.tokenize(__UpperCAmelCase) if return_ids: a_ = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCAmelCase) return ServeTokenizeResult(tokens=__UpperCAmelCase , tokens_ids=__UpperCAmelCase) else: return ServeTokenizeResult(tokens=__UpperCAmelCase) except Exception as e: raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(__UpperCAmelCase)}) def UpperCAmelCase__ ( self , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , ) ->List[str]: try: a_ = self._pipeline.tokenizer.decode(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return ServeDeTokenizeResult(model="" , text=__UpperCAmelCase) except Exception as e: raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(__UpperCAmelCase)}) async def UpperCAmelCase__ ( self , __UpperCAmelCase=Body(__UpperCAmelCase , embed=__UpperCAmelCase)) ->int: # Check we don't have empty string if len(__UpperCAmelCase) == 0: return ServeForwardResult(output=[] , attention=[]) try: # Forward through the model a_ = self._pipeline(__UpperCAmelCase) return ServeForwardResult(output=__UpperCAmelCase) except Exception as e: raise HTTPException(5_00 , {"error": str(__UpperCAmelCase)})
303
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class snake_case : def __init__( self) ->Optional[int]: a_ = WATERMARK_BITS a_ = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images] a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2) a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0) return images
303
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") a_ = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) sd_pipe.set_scheduler("sample_euler") a_ = "A painting of a squirrel eating a burger" a_ = torch.manual_seed(0) a_ = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np") a_ = output.images a_ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a_ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def UpperCAmelCase__ ( self) ->Dict: a_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") a_ = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) sd_pipe.set_scheduler("sample_euler") a_ = "A painting of a squirrel eating a burger" a_ = torch.manual_seed(0) a_ = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np") a_ = output.images a_ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a_ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1 def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") a_ = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) sd_pipe.set_scheduler("sample_dpmpp_2m") a_ = "A painting of a squirrel eating a burger" a_ = torch.manual_seed(0) a_ = sd_pipe( [prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__UpperCAmelCase , ) a_ = output.images a_ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a_ = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
303
"""simple docstring""" import math UpperCamelCase_ = 10 UpperCamelCase_ = 7 UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS def UpperCamelCase ( UpperCAmelCase = 20 ) ->str: """simple docstring""" a_ = math.comb(UpperCAmelCase , UpperCAmelCase ) a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase ) a_ = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {'tokenization_bertweet': ['BertweetTokenizer']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCamelCase_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: a_ = k.replace(UpperCAmelCase , UpperCAmelCase ) return k def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration: """simple docstring""" a_ = DEFAULTS.copy() cfg_kwargs.update(UpperCAmelCase ) a_ = PegasusConfig(**UpperCAmelCase ) a_ = PegasusForConditionalGeneration(UpperCAmelCase ) a_ = torch_model.model.state_dict() a_ = {} for k, v in tf_weights.items(): a_ = rename_state_dict_key(UpperCAmelCase ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: a_ = v.T a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) a_ = mapping["shared.weight"] a_ = mapping["shared.weight"] a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCAmelCase ) a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) a_ = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict: """simple docstring""" a_ = tf.train.list_variables(UpperCAmelCase ) a_ = {} a_ = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ): a_ = any(pat in name for pat in ignore_name ) if skip_key: continue a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase ) a_ = array return tf_weights def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = Path(UpperCAmelCase ).parent.name a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"] a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCAmelCase ) # convert model a_ = get_tf_weights_as_numpy(UpperCAmelCase ) a_ = task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": a_ = task_specific_params a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase ) torch_model.save_pretrained(UpperCAmelCase ) a_ = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase_ = parser.parse_args() if args.save_dir is None: UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name UpperCamelCase_ = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
303
1
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase_ = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } UpperCamelCase_ = { 'Salesforce/codegen-350M-mono': 2048, } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = VOCAB_FILES_NAMES a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP a_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : int = ["""input_ids""", """attention_mask"""] a_ : Union[str, Any] = CodeGenTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->Optional[int]: super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) if kwargs.pop("add_bos_token" , __UpperCAmelCase): a_ = kwargs.pop("name_or_path" , "") raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly.") a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space: a_ = getattr(__UpperCAmelCase , pre_tok_state.pop("type")) a_ = add_prefix_space a_ = pre_tok_class(**__UpperCAmelCase) a_ = add_prefix_space def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->BatchEncoding: a_ = kwargs.get("is_split_into_words" , __UpperCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->BatchEncoding: a_ = kwargs.get("is_split_into_words" , __UpperCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]: a_ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase) return tuple(__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->str: a_ = super().decode( token_ids=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , ) if truncate_before_pattern is not None and len(__UpperCAmelCase) > 0: a_ = self.truncate(__UpperCAmelCase , __UpperCAmelCase) return decoded_text def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int: def find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase): a_ = pattern.search(__UpperCAmelCase , __UpperCAmelCase) return m.start() if m else -1 a_ = [re.compile(__UpperCAmelCase , re.MULTILINE) for pattern in truncate_before_pattern] a_ = list(re.finditer("^print" , __UpperCAmelCase , re.MULTILINE)) if len(__UpperCAmelCase) > 1: a_ = completion[: prints[1].start()] a_ = list(re.finditer("^def" , __UpperCAmelCase , re.MULTILINE)) if len(__UpperCAmelCase) > 1: a_ = completion[: defs[1].start()] a_ = 0 a_ = [ pos for pos in [find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) for terminal in terminals] if pos != -1 ] if len(__UpperCAmelCase) > 0: return completion[: min(__UpperCAmelCase)] else: return completion
303
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = use_labels a_ = scope def UpperCAmelCase__ ( self) ->Any: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self) ->Optional[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self) ->List[str]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str: a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]: a_ = True a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]: a_ = True a_ = True a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval() # first forward pass a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple: a_ = BertGenerationDecoder(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self) ->str: a_ , a_ , a_ , a_ = self.prepare_config_and_inputs() a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () a_ : List[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = BertGenerationEncoderTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs() a_ = "bert" self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: # This regression test was failing with PyTorch < 1.3 ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() a_ = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(__UpperCAmelCase) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 10_24]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 5_03_58]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" if not head: return True # split the list to two parts a_ , a_ = head.next, head while fast and fast.next: a_ = fast.next.next a_ = slow.next a_ = slow.next a_ = None # Don't forget here! But forget still works! # reverse the second part a_ = None while second: a_ = second.next a_ = node a_ = second a_ = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a_ = node.next a_ = head.next return True def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) a_ = a_ = a_ = head while fast and fast.next: a_ , a_ = fast.next.next, slow.next # 2. Push the second half into the stack a_ = [slow.val] while slow.next: a_ = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a_ = cur.next return True def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" if not head or not head.next: return True a_ = {} a_ = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase ) else: a_ = [pos] a_ = head.next pos += 1 a_ = pos - 1 a_ = 0 for v in d.values(): if len(UpperCAmelCase ) % 2 != 0: middle += 1 else: a_ = 0 for i in range(0 , len(UpperCAmelCase ) ): if v[i] + v[len(UpperCAmelCase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
303
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if "resnet-50" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase ) # set label attributes a_ = "panoptic" in model_name if is_panoptic: a_ = 250 else: a_ = 91 a_ = "huggingface/label-files" a_ = "coco-detection-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = state_dict.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = "" if is_panoptic: a_ = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a_ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a_ = in_proj_weight_cross_attn[:256, :] a_ = in_proj_bias_cross_attn[:256] a_ = in_proj_weight_cross_attn[256:512, :] a_ = in_proj_bias_cross_attn[256:512] a_ = in_proj_weight_cross_attn[-256:, :] a_ = in_proj_bias_cross_attn[-256:] def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]: """simple docstring""" a_ , a_ = get_detr_config(UpperCAmelCase ) # load original model from torch hub a_ = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(F'''Converting model {model_name}...''' ) a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval() a_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase ): if is_panoptic: a_ = "detr." + src rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a_ = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a_ = state_dict.pop(UpperCAmelCase ) a_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val # finally, create HuggingFace model and load state dict a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) model.eval() # verify our conversion on an image a_ = "coco_panoptic" if is_panoptic else "coco_detection" a_ = DetrImageProcessor(format=UpperCAmelCase ) a_ = processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = detr(UpperCAmelCase ) a_ = model(UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str: """simple docstring""" a_ = args.log_outputs a_ = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric a_ = load_metric("wer" ) a_ = load_metric("cer" ) # compute metrics a_ = wer.compute(references=result["target"] , predictions=result["prediction"] ) a_ = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results a_ = F'''WER: {wer_result}\nCER: {cer_result}''' print(UpperCAmelCase ) with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f: f.write(UpperCAmelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: a_ = F'''log_{dataset_id}_predictions.txt''' a_ = F'''log_{dataset_id}_targets.txt''' with open(UpperCAmelCase , "w" ) as p, open(UpperCAmelCase , "w" ) as t: # mapping function to write output def write_to_file(UpperCAmelCase , UpperCAmelCase ): p.write(F'''{i}''' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(F'''{i}''' + "\n" ) t.write(batch["target"] + "\n" ) result.map(UpperCAmelCase , with_indices=UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" a_ = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training a_ = re.sub(UpperCAmelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! a_ = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: a_ = " ".join(text.split(UpperCAmelCase ) ) return text def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCAmelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor a_ = AutoFeatureExtractor.from_pretrained(args.model_id ) a_ = feature_extractor.sampling_rate # resample audio a_ = dataset.cast_column("audio" , Audio(sampling_rate=UpperCAmelCase ) ) # load eval pipeline if args.device is None: a_ = 0 if torch.cuda.is_available() else -1 a_ = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(UpperCAmelCase ): a_ = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) a_ = prediction["text"] a_ = normalize_text(batch["sentence"] ) return batch # run inference on all examples a_ = dataset.map(UpperCAmelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) UpperCamelCase_ = parser.parse_args() main(args)
303
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase ( UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: a_ = [3, 3, 3, 3] a_ = [5, 5, 5, 5] elif "fl4" in model_name: a_ = [4, 4, 4, 4] a_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: a_ = [3, 3, 3, 3] if "lrf" in model_name: a_ = [3, 3, 3, 3] else: a_ = [2, 2, 2, 2] if "tiny" in model_name: a_ = 96 elif "small" in model_name: a_ = 96 elif "base" in model_name: a_ = 128 elif "large" in model_name: a_ = 192 elif "xlarge" in model_name: a_ = 256 elif "huge" in model_name: a_ = 352 # set label information a_ = "huggingface/label-files" if "large" in model_name or "huge" in model_name: a_ = "imagenet-22k-id2label.json" else: a_ = "imagenet-1k-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = {v: k for k, v in idalabel.items()} a_ = FocalNetConfig( embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , ) return config def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" if "patch_embed.proj" in name: a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a_ = "encoder." + name if "encoder.layers" in name: a_ = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: a_ = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: a_ = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: a_ = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: a_ = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: a_ = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": a_ = "layernorm.weight" if name == "norm.bias": a_ = "layernorm.bias" if "head" in name: a_ = name.replace("head" , "classifier" ) else: a_ = "focalnet." + name return name def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict: """simple docstring""" a_ = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on a_ = model_name_to_url[model_name] print("Checkpoint URL: " , UpperCAmelCase ) a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): a_ = state_dict.pop(UpperCAmelCase ) a_ = val a_ = get_focalnet_config(UpperCAmelCase ) a_ = FocalNetForImageClassification(UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(UpperCAmelCase ) # verify conversion a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = BitImageProcessor( do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , ) a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) a_ = processor(images=UpperCAmelCase , return_tensors="pt" ) a_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 ) a_ = model(**UpperCAmelCase ) a_ = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": a_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": a_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": a_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": a_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": a_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": a_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCamelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" from heapq import heappop, heappush import numpy as np def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" a_ , a_ = grid.shape a_ = [-1, 1, 0, 0] a_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] a_ , a_ = [(0, source)], set() a_ = np.full((rows, cols) , np.inf ) a_ = 0 a_ = np.empty((rows, cols) , dtype=UpperCAmelCase ) a_ = None while queue: ((a_) , (a_)) = heappop(UpperCAmelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: a_ = [] while (x, y) != source: path.append((x, y) ) a_ , a_ = predecessors[x, y] path.append(UpperCAmelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase ) ): a_ , a_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: a_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase , (dist + 1, (nx, ny)) ) a_ = dist + 1 a_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
303
"""simple docstring""" import os import numpy import onnx def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = a.name a_ = b.name a_ = "" a_ = "" a_ = a == b a_ = name_a a_ = name_b return res def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = list(model.graph.initializer ) a_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a_ = inits[i].name a_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = os.path.dirname(UpperCAmelCase ) a_ = os.path.basename(UpperCAmelCase ) a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) a_ = list(model.graph.initializer ) a_ = set() a_ = {} a_ = [] a_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) a_ = inits[j].data_type a_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , UpperCAmelCase ) total_reduced_size += mem_size a_ = inits[i].name a_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: a_ = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" ) a_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = "optimized_" + model_file_name a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
303
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCamelCase_ = None UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase_ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } UpperCamelCase_ = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off UpperCamelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : List[str] = VOCAB_FILES_NAMES a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a_ : List[str] = ["""input_ids""", """attention_mask"""] a_ : Union[str, Any] = NllbTokenizer a_ : List[int] = [] a_ : List[int] = [] def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it a_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token a_ = legacy_behaviour super().__init__( vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , ) a_ = vocab_file a_ = False if not self.vocab_file else True a_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) a_ = { lang_code: self.convert_tokens_to_ids(__UpperCAmelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } a_ = src_lang if src_lang is not None else "eng_Latn" a_ = self.convert_tokens_to_ids(self._src_lang) a_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def UpperCAmelCase__ ( self) ->str: return self._src_lang @src_lang.setter def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None: a_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->List[Any]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") a_ = src_lang a_ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) a_ = self.convert_tokens_to_ids(__UpperCAmelCase) a_ = tgt_lang_id return inputs def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "eng_Latn" , __UpperCAmelCase = None , __UpperCAmelCase = "fra_Latn" , **__UpperCAmelCase , ) ->BatchEncoding: a_ = src_lang a_ = tgt_lang return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Any: return self.set_src_lang_special_tokens(self.src_lang) def UpperCAmelCase__ ( self) ->Optional[int]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None: a_ = self.convert_tokens_to_ids(__UpperCAmelCase) if self.legacy_behaviour: a_ = [] a_ = [self.eos_token_id, self.cur_lang_code] else: a_ = [self.cur_lang_code] a_ = [self.eos_token_id] a_ = self.convert_ids_to_tokens(self.prefix_tokens) a_ = self.convert_ids_to_tokens(self.suffix_tokens) a_ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None: a_ = self.convert_tokens_to_ids(__UpperCAmelCase) if self.legacy_behaviour: a_ = [] a_ = [self.eos_token_id, self.cur_lang_code] else: a_ = [self.cur_lang_code] a_ = [self.eos_token_id] a_ = self.convert_ids_to_tokens(self.prefix_tokens) a_ = self.convert_ids_to_tokens(self.suffix_tokens) a_ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__UpperCAmelCase): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''') return a_ = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase): copyfile(self.vocab_file , __UpperCAmelCase) return (out_vocab_file,)
303
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str: a_ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } a_ = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): a_ = token_dict["token"] a_ = Tokenizer(Unigram()) a_ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}") , " "), normalizers.Lowercase(), ]) a_ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase), pre_tokenizers.Digits(individual_digits=__UpperCAmelCase), pre_tokenizers.Punctuation(), ]) a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase) a_ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) a_ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a_ = [files] self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int: a_ = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase) self.add_unk_id() def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = json.loads(self._tokenizer.to_str()) a_ = self.special_tokens["unk"]["id"] a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
303
1
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex UpperCamelCase_ = logging.getLogger(__name__) class snake_case : def __init__( self) ->List[str]: a_ = False def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: if not self.initialized: a_ = RagRetriever( __UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , ) a_ = True def UpperCAmelCase__ ( self) ->str: self.retriever.index.init_index() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict: a_ , a_ = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase) return doc_ids, retrieved_doc_embeds class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->str: if index is not None and index.is_initialized() and len(__UpperCAmelCase) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py ") super().__init__( __UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , ) a_ = retrieval_workers if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) for worker in self.retrieval_workers ]) def UpperCAmelCase__ ( self) ->List[Any]: logger.info("initializing retrieval") if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]: if len(self.retrieval_workers) > 0: # Select a random retrieval actor. a_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)] a_ , a_ = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase)) else: a_ , a_ = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase) @classmethod def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->Dict: return super(__UpperCAmelCase , cls).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) @classmethod def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->str: a_ = kwargs.pop("config" , __UpperCAmelCase) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase) a_ = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase) a_ = rag_tokenizer.question_encoder a_ = rag_tokenizer.generator if indexed_dataset is not None: a_ = "custom" a_ = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase) else: a_ = cls._build_index(__UpperCAmelCase) return cls( __UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
303
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
303
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Any = """ibert""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=False , __UpperCAmelCase="none" , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = quant_mode a_ = force_dequant class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = """audio-spectrogram-transformer""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = patch_size a_ = qkv_bias a_ = frequency_stride a_ = time_stride a_ = max_length a_ = num_mel_bins
303
1
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = use_labels a_ = scope def UpperCAmelCase__ ( self) ->Any: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self) ->Optional[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self) ->List[str]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str: a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]: a_ = True a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]: a_ = True a_ = True a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval() # first forward pass a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple: a_ = BertGenerationDecoder(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self) ->str: a_ , a_ , a_ , a_ = self.prepare_config_and_inputs() a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () a_ : List[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = BertGenerationEncoderTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs() a_ = "bert" self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: # This regression test was failing with PyTorch < 1.3 ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() a_ = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(__UpperCAmelCase) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 10_24]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 5_03_58]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = """xlm-roberta""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Tuple = """audio-spectrogram-transformer""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = patch_size a_ = qkv_bias a_ = frequency_stride a_ = time_stride a_ = max_length a_ = num_mel_bins
303
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = scope a_ = range_bbox def UpperCAmelCase__ ( self) ->int: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: a_ = bbox[i, j, 3] a_ = bbox[i, j, 1] a_ = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ = bbox[i, j, 2] a_ = bbox[i, j, 0] a_ = t a_ = None if self.use_input_mask: a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self) ->List[str]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any: a_ = LiltModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]: a_ = self.num_labels a_ = LiltForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict: a_ = LiltForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self) ->str: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a_ : List[str] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) a_ : Any = False a_ : Dict = False def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: return True def UpperCAmelCase__ ( self) ->str: a_ = LiltModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->List[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = LiltModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @require_torch @slow class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->List[Any]: a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase) a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase) a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase) # forward pass with torch.no_grad(): a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase) a_ = torch.Size([1, 2, 7_68]) a_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
303
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->List[Any]: """simple docstring""" a_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: a_ = "" else: a_ = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[ : config.hidden_size, : ] a_ = in_proj_bias[: config.hidden_size] a_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a_ = in_proj_weight[ -config.hidden_size :, : ] a_ = in_proj_bias[-config.hidden_size :] def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = dct.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( ) ->str: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = DeiTConfig() # all deit models have fine-tuned heads a_ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size a_ = 1_000 a_ = "huggingface/label-files" a_ = "imagenet-1k-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} a_ = int(deit_name[-6:-4] ) a_ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): a_ = 192 a_ = 768 a_ = 12 a_ = 3 elif deit_name[9:].startswith("small" ): a_ = 384 a_ = 1_536 a_ = 12 a_ = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): a_ = 1_024 a_ = 4_096 a_ = 24 a_ = 16 # load original model from timm a_ = timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys a_ = timm_model.state_dict() a_ = create_rename_keys(UpperCAmelCase , UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # load HuggingFace model a_ = DeiTForImageClassificationWithTeacher(UpperCAmelCase ).eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor a_ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 a_ = DeiTImageProcessor(size=UpperCAmelCase , crop_size=config.image_size ) a_ = image_processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = model(UpperCAmelCase ) a_ = timm_model(UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCAmelCase , outputs.logits , atol=1E-3 ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCamelCase_ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
303
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case ( SCREAMING_SNAKE_CASE_ ): def UpperCAmelCase__ ( self) ->Any: a_ = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim")) self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads")) class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]: a_ = parent a_ = batch_size a_ = image_size a_ = patch_sizes a_ = patch_stride a_ = patch_padding a_ = is_training a_ = use_labels a_ = num_labels a_ = num_channels a_ = embed_dim a_ = num_heads a_ = stride_kv a_ = depth a_ = cls_token a_ = attention_drop_rate a_ = initializer_range a_ = layer_norm_eps def UpperCAmelCase__ ( self) ->Any: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a_ = None if self.use_labels: # create a random int32 tensor of given shape a_ = ids_tensor([self.batch_size] , self.num_labels) a_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self) ->Union[str, Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]: a_ = TFCvtModel(config=__UpperCAmelCase) a_ = model(__UpperCAmelCase , training=__UpperCAmelCase) a_ = (self.image_size, self.image_size) a_ , a_ = image_size[0], image_size[1] for i in range(len(self.depth)): a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = self.num_labels a_ = TFCvtForImageClassification(__UpperCAmelCase) a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self) ->Tuple: a_ = self.prepare_config_and_inputs() a_ , a_ , a_ = config_and_inputs a_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () a_ : List[Any] = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) a_ : Any = False a_ : Dict = False a_ : Optional[int] = False a_ : List[Any] = False a_ : List[Any] = False def UpperCAmelCase__ ( self) ->List[str]: a_ = TFCvtModelTester(self) a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[str]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def UpperCAmelCase__ ( self) ->Dict: pass @unittest.skip(reason="Cvt does not use inputs_embeds") def UpperCAmelCase__ ( self) ->List[str]: pass @unittest.skip(reason="Cvt does not support input and output embeddings") def UpperCAmelCase__ ( self) ->Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def UpperCAmelCase__ ( self) ->Dict: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def UpperCAmelCase__ ( self) ->List[str]: super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8") def UpperCAmelCase__ ( self) ->Dict: a_ = tf.keras.mixed_precision.Policy("mixed_float16") tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32") def UpperCAmelCase__ ( self) ->Optional[int]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(__UpperCAmelCase) a_ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase): a_ = model_class(__UpperCAmelCase) a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a_ = outputs.hidden_states a_ = len(self.model_tester.depth) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Dict: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = TFCvtModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self) ->int: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def UpperCAmelCase__ ( self) ->Any: a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf") # forward pass a_ = model(**__UpperCAmelCase) # verify the logits a_ = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , __UpperCAmelCase) a_ = tf.constant([0.9_285, 0.9_015, -0.3_150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters UpperCamelCase_ = False UpperCamelCase_ = False def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" return TrainCommand(UpperCAmelCase ) class snake_case ( SCREAMING_SNAKE_CASE_ ): @staticmethod def UpperCAmelCase__ ( __UpperCAmelCase) ->Union[str, Any]: a_ = parser.add_parser("train" , help="CLI tool to train a model on a task.") train_parser.add_argument( "--train_data" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=__UpperCAmelCase , default=0 , help="Column of the dataset csv file with example labels.") train_parser.add_argument( "--column_text" , type=__UpperCAmelCase , default=1 , help="Column of the dataset csv file with example texts.") train_parser.add_argument( "--column_id" , type=__UpperCAmelCase , default=2 , help="Column of the dataset csv file with example ids.") train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers).") train_parser.add_argument("--validation_data" , type=__UpperCAmelCase , default="" , help="path to validation dataset.") train_parser.add_argument( "--validation_split" , type=__UpperCAmelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=__UpperCAmelCase , default="./" , help="path to saved the trained model.") train_parser.add_argument( "--task" , type=__UpperCAmelCase , default="text_classification" , help="Task to train the model on.") train_parser.add_argument( "--model" , type=__UpperCAmelCase , default="bert-base-uncased" , help="Model's name or path to stored model.") train_parser.add_argument("--train_batch_size" , type=__UpperCAmelCase , default=32 , help="Batch size for training.") train_parser.add_argument("--valid_batch_size" , type=__UpperCAmelCase , default=64 , help="Batch size for validation.") train_parser.add_argument("--learning_rate" , type=__UpperCAmelCase , default=3E-5 , help="Learning rate.") train_parser.add_argument("--adam_epsilon" , type=__UpperCAmelCase , default=1E-08 , help="Epsilon for Adam optimizer.") train_parser.set_defaults(func=__UpperCAmelCase) def __init__( self , __UpperCAmelCase) ->List[Any]: a_ = logging.get_logger("transformers-cli/training") a_ = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=__UpperCAmelCase) a_ = args.output a_ = args.column_label a_ = args.column_text a_ = args.column_id self.logger.info(F'''Loading {args.task} pipeline for {args.model}''') if args.task == "text_classification": a_ = TextClassificationPipeline.from_pretrained(args.model) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F'''Loading dataset from {args.train_data}''') a_ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) a_ = None if args.validation_data: self.logger.info(F'''Loading validation dataset from {args.validation_data}''') a_ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) a_ = args.validation_split a_ = args.train_batch_size a_ = args.valid_batch_size a_ = args.learning_rate a_ = args.adam_epsilon def UpperCAmelCase__ ( self) ->Optional[Any]: if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCAmelCase__ ( self) ->Union[str, Any]: raise NotImplementedError def UpperCAmelCase__ ( self) ->Any: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output)
303
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Dict = """Speech2TextFeatureExtractor""" a_ : str = """Speech2TextTokenizer""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]: super().__init__(__UpperCAmelCase , __UpperCAmelCase) a_ = self.feature_extractor a_ = False def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") a_ = kwargs.pop("raw_speech") else: a_ = kwargs.pop("audio" , __UpperCAmelCase) a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase) a_ = kwargs.pop("text" , __UpperCAmelCase) if len(__UpperCAmelCase) > 0: a_ = args[0] a_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif audio is None: return encodings else: a_ = encodings["input_ids"] return inputs def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase) @contextmanager def UpperCAmelCase__ ( self) ->Tuple: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call.") a_ = True a_ = self.tokenizer yield a_ = self.feature_extractor a_ = False
303
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->List[str]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. a_ = [[1, 2, 4], [1, 2, 3, 4]] a_ = DisjunctiveConstraint(__UpperCAmelCase) self.assertTrue(isinstance(dc.token_ids , __UpperCAmelCase)) with self.assertRaises(__UpperCAmelCase): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(__UpperCAmelCase): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def UpperCAmelCase__ ( self) ->List[str]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). a_ = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCAmelCase): DisjunctiveConstraint(__UpperCAmelCase) # fails here def UpperCAmelCase__ ( self) ->Any: a_ = [[1, 2, 3], [1, 2, 4]] a_ = DisjunctiveConstraint(__UpperCAmelCase) a_ , a_ , a_ = dc.update(1) a_ = stepped is True and completed is False and reset is False self.assertTrue(__UpperCAmelCase) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) a_ , a_ , a_ = dc.update(2) a_ = stepped is True and completed is False and reset is False self.assertTrue(__UpperCAmelCase) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) a_ , a_ , a_ = dc.update(3) a_ = stepped is True and completed is True and reset is False self.assertTrue(__UpperCAmelCase) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def UpperCAmelCase__ ( self) ->Optional[int]: a_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a_ = DisjunctiveConstraint(__UpperCAmelCase) a_ , a_ , a_ = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) a_ , a_ , a_ = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) a_ , a_ , a_ = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) a_ , a_ , a_ = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() a_ , a_ , a_ = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) a_ , a_ , a_ = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) a_ , a_ , a_ = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Dict = DDIMPipeline a_ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS a_ : Tuple = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } a_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS a_ : Union[str, Any] = False def UpperCAmelCase__ ( self) ->List[str]: torch.manual_seed(0) a_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) a_ = DDIMScheduler() a_ = {"unet": unet, "scheduler": scheduler} return components def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0) ->str: if str(__UpperCAmelCase).startswith("mps"): a_ = torch.manual_seed(__UpperCAmelCase) else: a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a_ = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCAmelCase__ ( self) ->Optional[int]: a_ = "cpu" a_ = self.get_dummy_components() a_ = self.pipeline_class(**__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a_ = self.get_dummy_inputs(__UpperCAmelCase) a_ = pipe(**__UpperCAmelCase).images a_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3)) a_ = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04]) a_ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(__UpperCAmelCase , 1E-3) def UpperCAmelCase__ ( self) ->str: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) def UpperCAmelCase__ ( self) ->Dict: super().test_save_load_local(expected_max_difference=3E-3) def UpperCAmelCase__ ( self) ->List[Any]: super().test_save_load_optional_components(expected_max_difference=3E-3) def UpperCAmelCase__ ( self) ->Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Tuple: a_ = "google/ddpm-cifar10-32" a_ = UNetaDModel.from_pretrained(__UpperCAmelCase) a_ = DDIMScheduler() a_ = DDIMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) ddim.to(__UpperCAmelCase) ddim.set_progress_bar_config(disable=__UpperCAmelCase) a_ = torch.manual_seed(0) a_ = ddim(generator=__UpperCAmelCase , eta=0.0 , output_type="numpy").images a_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a_ = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def UpperCAmelCase__ ( self) ->Dict: a_ = "google/ddpm-ema-bedroom-256" a_ = UNetaDModel.from_pretrained(__UpperCAmelCase) a_ = DDIMScheduler.from_pretrained(__UpperCAmelCase) a_ = DDIMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) ddpm.to(__UpperCAmelCase) ddpm.set_progress_bar_config(disable=__UpperCAmelCase) a_ = torch.manual_seed(0) a_ = ddpm(generator=__UpperCAmelCase , output_type="numpy").images a_ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) a_ = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
303
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base") a_ = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" a_ = model(__UpperCAmelCase)["last_hidden_state"] a_ = tf.TensorShape((1, 10, 7_68)) self.assertEqual(output.shape , __UpperCAmelCase) # compare the actual values for a slice. a_ = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
303
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase_ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" from torch import nn def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'''Unsupported activation function: {act_fn}''' )
303
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase_ = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['OwlViTFeatureExtractor'] UpperCamelCase_ = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
303
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase_ = 701 UpperCamelCase_ = 1000000000 UpperCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
303
1
"""simple docstring""" import math import qiskit def UpperCamelCase ( UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = 1 ) ->qiskit.result.counts.Counts: """simple docstring""" if ( isinstance(UpperCAmelCase , UpperCAmelCase ) or isinstance(UpperCAmelCase , UpperCAmelCase ) or isinstance(UpperCAmelCase , UpperCAmelCase ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(UpperCAmelCase ) != input_a) or (math.floor(UpperCAmelCase ) != input_a) or (math.floor(UpperCAmelCase ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers a_ = qiskit.QuantumRegister(4 , "qr" ) a_ = qiskit.ClassicalRegister(2 , "cr" ) # list the entries a_ = [input_a, input_a, carry_in] a_ = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(UpperCAmelCase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(UpperCAmelCase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(UpperCAmelCase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , UpperCAmelCase ) # measure the last two qbits a_ = qiskit.Aer.get_backend("aer_simulator" ) a_ = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1_000 ) return job.result().get_counts(UpperCAmelCase ) if __name__ == "__main__": print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
303
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
1
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. UpperCamelCase_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n' class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Optional[int]: a_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , "models/bert/")) a_ = self.transformer_dir shutil.copy( os.path.join(__UpperCAmelCase , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , ) def UpperCAmelCase__ ( self) ->int: a_ = "src/transformers" shutil.rmtree(self.transformer_dir) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Dict: a_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: a_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19) a_ = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase) a_ = os.path.join(self.transformer_dir , "new_code.py") with open(__UpperCAmelCase , "w" , newline="\n") as f: f.write(__UpperCAmelCase) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__UpperCAmelCase)) == 0) else: check_copies.is_copy_consistent(f.name , overwrite=__UpperCAmelCase) with open(__UpperCAmelCase , "r") as f: self.assertTrue(f.read() , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[Any]: a_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->str: # Base copy consistency self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __UpperCAmelCase) , ) # Copy consistency with a really long name a_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , __UpperCAmelCase , __UpperCAmelCase) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __UpperCAmelCase , overwrite_result=re.sub("Bert" , "TestModel" , __UpperCAmelCase) , ) def UpperCAmelCase__ ( self) ->int: a_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"] a_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) a_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) a_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) a_ , a_ = check_copies.convert_to_localized_md( __UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"]) self.assertFalse(__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) a_ , a_ = check_copies.convert_to_localized_md( __UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"]) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__UpperCAmelCase) a_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) a_ = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) a_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) a_ , a_ = check_copies.convert_to_localized_md( __UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"]) # Check if the model link is synchronized. self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
303
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Dict: a_ = inspect.getfile(accelerate.test_utils) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) a_ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def UpperCAmelCase__ ( self) ->Any: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->str: print(F'''Found {torch.cuda.device_count()} devices.''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''') with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase__ ( self) ->List[Any]: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''') a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy()) if __name__ == "__main__": UpperCamelCase_ = Accelerator() UpperCamelCase_ = (accelerator.state.process_index + 2, 10) UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase_ = '' UpperCamelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
303
1
"""simple docstring""" from __future__ import annotations UpperCamelCase_ = [True] * 1000001 UpperCamelCase_ = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): UpperCamelCase_ = False i += 1 def UpperCamelCase ( UpperCAmelCase ) ->bool: """simple docstring""" return seive[n] def UpperCamelCase ( UpperCAmelCase ) ->bool: """simple docstring""" return any(digit in "02468" for digit in str(UpperCAmelCase ) ) def UpperCamelCase ( UpperCAmelCase = 1_000_000 ) ->list[int]: """simple docstring""" a_ = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(UpperCAmelCase ) and not contains_an_even_digit(UpperCAmelCase ): a_ = str(UpperCAmelCase ) a_ = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCAmelCase ) )] if all(is_prime(UpperCAmelCase ) for i in list_nums ): result.append(UpperCAmelCase ) return result def UpperCamelCase ( ) ->int: """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(F"""{len(find_circular_primes()) = }""")
303
"""simple docstring""" from heapq import heappop, heappush import numpy as np def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" a_ , a_ = grid.shape a_ = [-1, 1, 0, 0] a_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] a_ , a_ = [(0, source)], set() a_ = np.full((rows, cols) , np.inf ) a_ = 0 a_ = np.empty((rows, cols) , dtype=UpperCAmelCase ) a_ = None while queue: ((a_) , (a_)) = heappop(UpperCAmelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: a_ = [] while (x, y) != source: path.append((x, y) ) a_ , a_ = predecessors[x, y] path.append(UpperCAmelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase ) ): a_ , a_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: a_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase , (dist + 1, (nx, ny)) ) a_ = dist + 1 a_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
303
1
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class snake_case ( nn.Module ): def __init__( self) ->int: super().__init__() a_ = nn.Linear(3 , 4) a_ = nn.BatchNormad(4) a_ = nn.Linear(4 , 5) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str: return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase))) class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->List[str]: a_ = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , model.state_dict()) a_ = os.path.join(__UpperCAmelCase , "index.json") self.assertTrue(os.path.isfile(__UpperCAmelCase)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: a_ = os.path.join(__UpperCAmelCase , F'''{key}.dat''') self.assertTrue(os.path.isfile(__UpperCAmelCase)) # TODO: add tests on the fact weights are properly loaded def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: a_ = torch.randn(2 , 3 , dtype=__UpperCAmelCase) with TemporaryDirectory() as tmp_dir: a_ = offload_weight(__UpperCAmelCase , "weight" , __UpperCAmelCase , {}) a_ = os.path.join(__UpperCAmelCase , "weight.dat") self.assertTrue(os.path.isfile(__UpperCAmelCase)) self.assertDictEqual(__UpperCAmelCase , {"weight": {"shape": [2, 3], "dtype": str(__UpperCAmelCase).split(".")[1]}}) a_ = load_offloaded_weight(__UpperCAmelCase , index["weight"]) self.assertTrue(torch.equal(__UpperCAmelCase , __UpperCAmelCase)) def UpperCAmelCase__ ( self) ->int: a_ = ModelForTest() a_ = model.state_dict() a_ = {k: v for k, v in state_dict.items() if "linear2" not in k} a_ = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase) a_ = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key])) a_ = {k: v for k, v in state_dict.items() if "weight" in k} a_ = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase) a_ = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase) # Duplicates are removed a_ = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key])) def UpperCAmelCase__ ( self) ->List[str]: a_ = {"a.1": 0, "a.10": 1, "a.2": 2} a_ = extract_submodules_state_dict(__UpperCAmelCase , ["a.1", "a.2"]) self.assertDictEqual(__UpperCAmelCase , {"a.1": 0, "a.2": 2}) a_ = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} a_ = extract_submodules_state_dict(__UpperCAmelCase , ["a.1", "a.2"]) self.assertDictEqual(__UpperCAmelCase , {"a.1.a": 0, "a.2.a": 2})
303
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class snake_case : def __init__( self) ->Optional[int]: a_ = WATERMARK_BITS a_ = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images] a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2) a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0) return images
303
1
"""simple docstring""" import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated UpperCamelCase_ = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ UpperCamelCase_ = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def UpperCamelCase ( UpperCAmelCase ) ->int: """simple docstring""" a_ = numpy.dtype(numpy.uintaa ).newbyteorder(">" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0] @deprecated(UpperCAmelCase , "Please use tf.data to implement this functionality." ) def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" print("Extracting" , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream: a_ = _readaa(UpperCAmelCase ) if magic != 2_051: raise ValueError( "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) ) a_ = _readaa(UpperCAmelCase ) a_ = _readaa(UpperCAmelCase ) a_ = _readaa(UpperCAmelCase ) a_ = bytestream.read(rows * cols * num_images ) a_ = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta ) a_ = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 ) return data @deprecated(UpperCAmelCase , "Please use tf.one_hot on tensors." ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any: """simple docstring""" a_ = labels_dense.shape[0] a_ = numpy.arange(UpperCAmelCase ) * num_classes a_ = numpy.zeros((num_labels, num_classes) ) a_ = 1 return labels_one_hot @deprecated(UpperCAmelCase , "Please use tf.data to implement this functionality." ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ) ->str: """simple docstring""" print("Extracting" , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream: a_ = _readaa(UpperCAmelCase ) if magic != 2_049: raise ValueError( "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) ) a_ = _readaa(UpperCAmelCase ) a_ = bytestream.read(UpperCAmelCase ) a_ = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase ) return labels class snake_case : @deprecated( __UpperCAmelCase , "Please use alternatives such as official/mnist/_DataSet.py" " from tensorflow/models." , ) def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[str]: a_ , a_ = random_seed.get_seed(__UpperCAmelCase) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) a_ = dtypes.as_dtype(__UpperCAmelCase).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) if fake_data: a_ = 1_00_00 a_ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'''images.shape: {images.shape} labels.shape: {labels.shape}''' a_ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 a_ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. a_ = images.astype(numpy.floataa) a_ = numpy.multiply(__UpperCAmelCase , 1.0 / 255.0) a_ = images a_ = labels a_ = 0 a_ = 0 @property def UpperCAmelCase__ ( self) ->List[str]: return self._images @property def UpperCAmelCase__ ( self) ->Any: return self._labels @property def UpperCAmelCase__ ( self) ->Tuple: return self._num_examples @property def UpperCAmelCase__ ( self) ->int: return self._epochs_completed def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True) ->str: if fake_data: a_ = [1] * 7_84 a_ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__UpperCAmelCase)], [fake_label for _ in range(__UpperCAmelCase)], ) a_ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: a_ = numpy.arange(self._num_examples) numpy.random.shuffle(__UpperCAmelCase) a_ = self.images[perma] a_ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch a_ = self._num_examples - start a_ = self._images[start : self._num_examples] a_ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: a_ = numpy.arange(self._num_examples) numpy.random.shuffle(__UpperCAmelCase) a_ = self.images[perm] a_ = self.labels[perm] # Start next epoch a_ = 0 a_ = batch_size - rest_num_examples a_ = self._index_in_epoch a_ = self._images[start:end] a_ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size a_ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(UpperCAmelCase , "Please write your own downloading logic." ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]: """simple docstring""" if not gfile.Exists(UpperCAmelCase ): gfile.MakeDirs(UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) if not gfile.Exists(UpperCAmelCase ): urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310 with gfile.GFile(UpperCAmelCase ) as f: a_ = f.size() print("Successfully downloaded" , UpperCAmelCase , UpperCAmelCase , "bytes." ) return filepath @deprecated( UpperCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5_000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ) ->Optional[Any]: """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase ) a_ = fake() a_ = fake() a_ = fake() return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase ) if not source_url: # empty string check a_ = DEFAULT_SOURCE_URL a_ = "train-images-idx3-ubyte.gz" a_ = "train-labels-idx1-ubyte.gz" a_ = "t10k-images-idx3-ubyte.gz" a_ = "t10k-labels-idx1-ubyte.gz" a_ = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + train_images_file ) with gfile.Open(UpperCAmelCase , "rb" ) as f: a_ = _extract_images(UpperCAmelCase ) a_ = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(UpperCAmelCase , "rb" ) as f: a_ = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase ) a_ = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + test_images_file ) with gfile.Open(UpperCAmelCase , "rb" ) as f: a_ = _extract_images(UpperCAmelCase ) a_ = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(UpperCAmelCase , "rb" ) as f: a_ = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase ) if not 0 <= validation_size <= len(UpperCAmelCase ): a_ = ( "Validation size should be between 0 and " F'''{len(UpperCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(UpperCAmelCase ) a_ = train_images[:validation_size] a_ = train_labels[:validation_size] a_ = train_images[validation_size:] a_ = train_labels[validation_size:] a_ = {"dtype": dtype, "reshape": reshape, "seed": seed} a_ = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) a_ = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) a_ = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
303
"""simple docstring""" import math UpperCamelCase_ = 10 UpperCamelCase_ = 7 UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS def UpperCamelCase ( UpperCAmelCase = 20 ) ->str: """simple docstring""" a_ = math.comb(UpperCAmelCase , UpperCAmelCase ) a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase ) a_ = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
303
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : int = LayoutLMTokenizer a_ : List[str] = LayoutLMTokenizerFast a_ : Any = True a_ : List[str] = True def UpperCAmelCase__ ( self) ->Dict: super().setUp() a_ = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->List[str]: return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str: a_ = "UNwant\u00E9d,running" a_ = "unwanted, running" return input_text, output_text def UpperCAmelCase__ ( self) ->List[str]: a_ = self.tokenizer_class(self.vocab_file) a_ = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def UpperCAmelCase__ ( self) ->Optional[Any]: pass
303
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCamelCase_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: a_ = k.replace(UpperCAmelCase , UpperCAmelCase ) return k def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration: """simple docstring""" a_ = DEFAULTS.copy() cfg_kwargs.update(UpperCAmelCase ) a_ = PegasusConfig(**UpperCAmelCase ) a_ = PegasusForConditionalGeneration(UpperCAmelCase ) a_ = torch_model.model.state_dict() a_ = {} for k, v in tf_weights.items(): a_ = rename_state_dict_key(UpperCAmelCase ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: a_ = v.T a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) a_ = mapping["shared.weight"] a_ = mapping["shared.weight"] a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCAmelCase ) a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) a_ = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict: """simple docstring""" a_ = tf.train.list_variables(UpperCAmelCase ) a_ = {} a_ = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ): a_ = any(pat in name for pat in ignore_name ) if skip_key: continue a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase ) a_ = array return tf_weights def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = Path(UpperCAmelCase ).parent.name a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"] a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCAmelCase ) # convert model a_ = get_tf_weights_as_numpy(UpperCAmelCase ) a_ = task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": a_ = task_specific_params a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase ) torch_model.save_pretrained(UpperCAmelCase ) a_ = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase_ = parser.parse_args() if args.save_dir is None: UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name UpperCamelCase_ = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
303
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" def count_of_possible_combinations(UpperCAmelCase ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" def count_of_possible_combinations_with_dp_array( UpperCAmelCase , UpperCAmelCase ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] a_ = sum( count_of_possible_combinations_with_dp_array(target - item , UpperCAmelCase ) for item in array ) a_ = answer return answer a_ = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = [0] * (target + 1) a_ = 1 for i in range(1 , target + 1 ): for j in range(UpperCAmelCase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase_ = 3 UpperCamelCase_ = 5 UpperCamelCase_ = [1, 2, 5] print(combination_sum_iv(n, array, target))
303
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = initializer_range a_ = use_labels a_ = scope def UpperCAmelCase__ ( self) ->Any: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self) ->Optional[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self) ->List[str]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.prepare_config_and_inputs() a_ = True a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str: a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]: a_ = True a_ = BertGenerationEncoder(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]: a_ = True a_ = True a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval() # first forward pass a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple: a_ = BertGenerationDecoder(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self) ->str: a_ , a_ , a_ , a_ = self.prepare_config_and_inputs() a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () a_ : List[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = BertGenerationEncoderTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Tuple: a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs() a_ = "bert" self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: # This regression test was failing with PyTorch < 1.3 ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() a_ = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def UpperCAmelCase__ ( self) ->List[Any]: a_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase) @slow def UpperCAmelCase__ ( self) ->str: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(__UpperCAmelCase) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 10_24]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) @require_torch class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->List[str]: a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size([1, 8, 5_03_58]) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
303
1
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" , [ SplitDict(), SplitDict({"train": SplitInfo(name="train" , num_bytes=1_337 , num_examples=42 , dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" , num_bytes=1_337 , num_examples=42 )} ), SplitDict({"train": SplitInfo()} ), ] , ) def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = split_dict._to_yaml_list() assert len(UpperCAmelCase ) == len(UpperCAmelCase ) a_ = SplitDict._from_yaml_list(UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump a_ = None # the split name of split_dict takes over the name of the split info object a_ = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase ), SplitInfo(dataset_name="my_dataset" )] ) def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" a_ = asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
303
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" if "resnet-50" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase ) # set label attributes a_ = "panoptic" in model_name if is_panoptic: a_ = 250 else: a_ = 91 a_ = "huggingface/label-files" a_ = "coco-detection-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} return config, is_panoptic def UpperCamelCase ( UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = state_dict.pop(UpperCAmelCase ) a_ = val def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]: """simple docstring""" a_ = "" if is_panoptic: a_ = "detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:256, :] a_ = in_proj_bias[:256] a_ = in_proj_weight[256:512, :] a_ = in_proj_bias[256:512] a_ = in_proj_weight[-256:, :] a_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a_ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a_ = in_proj_weight_cross_attn[:256, :] a_ = in_proj_bias_cross_attn[:256] a_ = in_proj_weight_cross_attn[256:512, :] a_ = in_proj_bias_cross_attn[256:512] a_ = in_proj_weight_cross_attn[-256:, :] a_ = in_proj_bias_cross_attn[-256:] def UpperCamelCase ( ) ->Dict: """simple docstring""" a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]: """simple docstring""" a_ , a_ = get_detr_config(UpperCAmelCase ) # load original model from torch hub a_ = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(F'''Converting model {model_name}...''' ) a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval() a_ = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCAmelCase ): if is_panoptic: a_ = "detr." + src rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a_ = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a_ = state_dict.pop(UpperCAmelCase ) a_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a_ = state_dict.pop(UpperCAmelCase ) a_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a_ = state_dict.pop(UpperCAmelCase ) a_ = val # finally, create HuggingFace model and load state dict a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) model.eval() # verify our conversion on an image a_ = "coco_panoptic" if is_panoptic else "coco_detection" a_ = DetrImageProcessor(format=UpperCAmelCase ) a_ = processor(images=prepare_img() , return_tensors="pt" ) a_ = encoding["pixel_values"] a_ = detr(UpperCAmelCase ) a_ = model(UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') UpperCamelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class snake_case : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) ->List[Any]: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = embedding_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = num_choices a_ = scope def UpperCAmelCase__ ( self) ->Optional[int]: a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a_ = None a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = ids_tensor([self.batch_size] , self.num_choices) a_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self) ->str: return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any: a_ = MobileBertModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase) a_ = model(__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = MobileBertForMaskedLM(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]: a_ = MobileBertForNextSentencePrediction(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int: a_ = MobileBertForPreTraining(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str: a_ = MobileBertForQuestionAnswering(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]: a_ = self.num_labels a_ = MobileBertForSequenceClassification(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any: a_ = self.num_labels a_ = MobileBertForTokenClassification(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]: a_ = self.num_choices a_ = MobileBertForMultipleChoice(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase__ ( self) ->Any: a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) a_ : Dict = ( { """feature-extraction""": MobileBertModel, """fill-mask""": MobileBertForMaskedLM, """question-answering""": MobileBertForQuestionAnswering, """text-classification""": MobileBertForSequenceClassification, """token-classification""": MobileBertForTokenClassification, """zero-shot""": MobileBertForSequenceClassification, } if is_torch_available() else {} ) a_ : List[Any] = True def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Union[str, Any]: a_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase) if return_labels: if model_class in get_values(__UpperCAmelCase): a_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase) a_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase) return inputs_dict def UpperCAmelCase__ ( self) ->List[str]: a_ = MobileBertModelTester(self) a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def UpperCAmelCase__ ( self) ->List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self) ->str: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Union[str, Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Any: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCAmelCase) def UpperCamelCase ( UpperCAmelCase ) ->Tuple: """simple docstring""" return torch.tensor( UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase , ) UpperCamelCase_ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self) ->int: a_ = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(__UpperCAmelCase) a_ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): a_ = model(__UpperCAmelCase)[0] a_ = torch.Size((1, 9, 5_12)) self.assertEqual(output.shape , __UpperCAmelCase) a_ = torch.tensor( [ [ [-2.473_6526E07, 8.269_1656E04, 1.652_1838E05], [-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00], [2.604_7359E00, 1.567_7652E00, -1.732_4188E-01], ] ] , device=__UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE) a_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE) self.assertTrue(lower_bound and upper_bound)
303
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase ( UpperCAmelCase ) ->Tuple: """simple docstring""" a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False a_ = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: a_ = [3, 3, 3, 3] a_ = [5, 5, 5, 5] elif "fl4" in model_name: a_ = [4, 4, 4, 4] a_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: a_ = [3, 3, 3, 3] if "lrf" in model_name: a_ = [3, 3, 3, 3] else: a_ = [2, 2, 2, 2] if "tiny" in model_name: a_ = 96 elif "small" in model_name: a_ = 96 elif "base" in model_name: a_ = 128 elif "large" in model_name: a_ = 192 elif "xlarge" in model_name: a_ = 256 elif "huge" in model_name: a_ = 352 # set label information a_ = "huggingface/label-files" if "large" in model_name or "huge" in model_name: a_ = "imagenet-22k-id2label.json" else: a_ = "imagenet-1k-id2label.json" a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) ) a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} a_ = {v: k for k, v in idalabel.items()} a_ = FocalNetConfig( embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , ) return config def UpperCamelCase ( UpperCAmelCase ) ->Any: """simple docstring""" if "patch_embed.proj" in name: a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a_ = "encoder." + name if "encoder.layers" in name: a_ = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: a_ = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: a_ = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: a_ = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: a_ = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: a_ = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": a_ = "layernorm.weight" if name == "norm.bias": a_ = "layernorm.bias" if "head" in name: a_ = name.replace("head" , "classifier" ) else: a_ = "focalnet." + name return name def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict: """simple docstring""" a_ = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on a_ = model_name_to_url[model_name] print("Checkpoint URL: " , UpperCAmelCase ) a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): a_ = state_dict.pop(UpperCAmelCase ) a_ = val a_ = get_focalnet_config(UpperCAmelCase ) a_ = FocalNetForImageClassification(UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(UpperCAmelCase ) # verify conversion a_ = "http://images.cocodataset.org/val2017/000000039769.jpg" a_ = BitImageProcessor( do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , ) a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) a_ = processor(images=UpperCAmelCase , return_tensors="pt" ) a_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 ) a_ = model(**UpperCAmelCase ) a_ = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": a_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": a_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": a_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": a_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": a_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": a_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCamelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
303
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
303
"""simple docstring""" import os import numpy import onnx def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]: """simple docstring""" a_ = a.name a_ = b.name a_ = "" a_ = "" a_ = a == b a_ = name_a a_ = name_b return res def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" a_ = list(model.graph.initializer ) a_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a_ = inits[i].name a_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]: """simple docstring""" a_ = os.path.dirname(UpperCAmelCase ) a_ = os.path.basename(UpperCAmelCase ) a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) a_ = list(model.graph.initializer ) a_ = set() a_ = {} a_ = [] a_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) a_ = inits[j].data_type a_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , UpperCAmelCase ) total_reduced_size += mem_size a_ = inits[i].name a_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: a_ = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" ) a_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = "optimized_" + model_file_name a_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
303
1