code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate snake_case = trt.Logger(trt.Logger.WARNING) snake_case = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) snake_case = logging.getLogger(__name__) snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=384, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=128, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) snake_case = parser.parse_args() if args.tokenizer_name: snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) snake_case = args.per_device_eval_batch_size snake_case = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties snake_case = True snake_case = """temp_engine/bert-fp32.engine""" if args.fpaa: snake_case = """temp_engine/bert-fp16.engine""" if args.inta: snake_case = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network snake_case = [network.get_input(i) for i in range(network.num_inputs)] snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: snake_case = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) snake_case = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) snake_case = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = np.asarray(inputs["input_ids"] , dtype=np.intaa ) SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(inputs["attention_mask"] , dtype=np.intaa ) SCREAMING_SNAKE_CASE : Any = np.asarray(inputs["token_type_ids"] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time SCREAMING_SNAKE_CASE : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time SCREAMING_SNAKE_CASE : Any = time.time() SCREAMING_SNAKE_CASE : Tuple = end_time - start_time SCREAMING_SNAKE_CASE : List[str] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. snake_case = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. snake_case = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. snake_case = raw_datasets["""validation"""].column_names snake_case = """question""" if """question""" in column_names else column_names[0] snake_case = """context""" if """context""" in column_names else column_names[1] snake_case = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). snake_case = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) snake_case = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="max_length" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. SCREAMING_SNAKE_CASE : List[Any] = tokenized_examples.pop("overflow_to_sample_mapping" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. SCREAMING_SNAKE_CASE : Any = [] for i in range(len(tokenized_examples["input_ids"] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). SCREAMING_SNAKE_CASE : List[str] = tokenized_examples.sequence_ids(lowercase ) SCREAMING_SNAKE_CASE : str = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. SCREAMING_SNAKE_CASE : List[Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. SCREAMING_SNAKE_CASE : List[str] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i] ) ] return tokenized_examples snake_case = raw_datasets["""validation"""] # Validation Feature Creation snake_case = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) snake_case = default_data_collator snake_case = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) snake_case = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase="eval" ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: SCREAMING_SNAKE_CASE : Optional[Any] = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [{"id": k, "prediction_text": v} for k, v in predictions.items()] SCREAMING_SNAKE_CASE : Dict = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) snake_case = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase__ ( lowercase ): """simple docstring""" return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) snake_case = cuda.mem_alloc(h_outputa.nbytes) snake_case = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. snake_case = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F""" Num examples = {len(eval_dataset)}""") logger.info(F""" Batch size = {args.per_device_eval_batch_size}""") snake_case = 0.0 snake_case = 0 snake_case = timeit.default_timer() snake_case = None for step, batch in enumerate(eval_dataloader): snake_case , snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 snake_case , snake_case = outputs snake_case = torch.tensor(start_logits) snake_case = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: snake_case = nested_truncate(all_preds, len(eval_dataset)) snake_case = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_000 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_000)) logger.info("""Total Number of Inference = %d""", niter) snake_case = post_processing_function(eval_examples, eval_dataset, all_preds) snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F"""Evaluation metrics: {eval_metric}""")
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [False] * len(lowercase ) SCREAMING_SNAKE_CASE : Any = [] queue.append(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = True while queue: SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase ) SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[int] = u return visited[t] def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * (len(lowercase )) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while bfs(lowercase , lowercase , lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = float("Inf" ) SCREAMING_SNAKE_CASE : List[str] = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE : int = min(lowercase , graph[parent[s]][s] ) SCREAMING_SNAKE_CASE : List[str] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE : Any = sink while v != source: SCREAMING_SNAKE_CASE : Optional[int] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE : int = parent[v] return max_flow snake_case = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] snake_case , snake_case = 0, 5 print(ford_fulkerson(graph, source, sink))
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) snake_case = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None ): SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Any = os.path.abspath(os.path.join("examples" , "by_feature" ) ) SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath("examples" ) for item in os.listdir(UpperCAmelCase_ ): if item not in EXCLUDE_EXAMPLES: SCREAMING_SNAKE_CASE : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path: with self.subTest( tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section="main()" if parser_only else "training_function()" , ): SCREAMING_SNAKE_CASE : str = compare_against_test( os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = "\n".join(UpperCAmelCase_ ) if special_strings is not None: for string in special_strings: SCREAMING_SNAKE_CASE : Union[str, Any] = diff.replace(UpperCAmelCase_ , "" ) self.assertEqual(UpperCAmelCase_ , "" ) def _A ( self : Any ): self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase_ ) self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) SCREAMING_SNAKE_CASE : Dict = [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) self.one_complete_example("complete_cv_example.py" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = False @classmethod def _A ( cls : str ): super().setUpClass() SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) SCREAMING_SNAKE_CASE : List[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _A ( cls : Any ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = f''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : List[Any] = f''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() SCREAMING_SNAKE_CASE : List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def _A ( self : str ): SCREAMING_SNAKE_CASE : List[Any] = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} '''.split() SCREAMING_SNAKE_CASE : List[str] = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ ) self.assertNotIn("epoch 0:" , UpperCAmelCase_ ) self.assertIn("epoch 1:" , UpperCAmelCase_ ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Any = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} '''.split() SCREAMING_SNAKE_CASE : Any = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ ) if torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Any = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if num_processes > 1: self.assertNotIn("epoch 0:" , UpperCAmelCase_ ) self.assertIn("epoch 1:" , UpperCAmelCase_ ) else: self.assertIn("epoch 0:" , UpperCAmelCase_ ) self.assertIn("epoch 1:" , UpperCAmelCase_ ) @slow def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): SCREAMING_SNAKE_CASE : Dict = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = re.findall("({.+})" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [r for r in results if "accuracy" in r][-1] SCREAMING_SNAKE_CASE : Optional[Any] = ast.literal_eval(UpperCAmelCase_ ) self.assertGreaterEqual(results["accuracy"] , 0.75 ) def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[Any] = ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _A ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdir: SCREAMING_SNAKE_CASE : Union[str, Any] = f''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "tracking" ) ) ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Union[str, Any] = ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def _A ( self : str ): SCREAMING_SNAKE_CASE : str = ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str ): SCREAMING_SNAKE_CASE : List[str] = "" SCREAMING_SNAKE_CASE : List[Any] = "" SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Tuple = 256 SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : List[str] = 0 def _A ( self : List[Any] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = cva.imread(UpperCAmelCase_ , 0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.img ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) SCREAMING_SNAKE_CASE : Optional[int] = np.sum(UpperCAmelCase_ ) for i in range(len(UpperCAmelCase_ ) ): SCREAMING_SNAKE_CASE : str = x[i] / self.k self.sk += prk SCREAMING_SNAKE_CASE : str = (self.L - 1) * self.sk if self.rem != 0: SCREAMING_SNAKE_CASE : List[str] = int(last % last ) SCREAMING_SNAKE_CASE : List[Any] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): SCREAMING_SNAKE_CASE : List[Any] = self.img[j][i] if num != self.last_list[num]: SCREAMING_SNAKE_CASE : Any = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def _A ( self : Optional[Any] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def _A ( self : Dict ): cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": snake_case = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") snake_case = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case = { """return_dict""": False, """output_hidden_states""": True, """output_attentions""": True, """torchscript""": True, """torch_dtype""": """float16""", """use_bfloat16""": True, """tf_legacy_loss""": True, """pruned_heads""": {"""a""": 1}, """tie_word_embeddings""": False, """is_decoder""": True, """cross_attention_hidden_size""": 128, """add_cross_attention""": True, """tie_encoder_decoder""": True, """max_length""": 50, """min_length""": 3, """do_sample""": True, """early_stopping""": True, """num_beams""": 3, """num_beam_groups""": 3, """diversity_penalty""": 0.5, """temperature""": 2.0, """top_k""": 10, """top_p""": 0.7, """typical_p""": 0.2, """repetition_penalty""": 0.8, """length_penalty""": 0.8, """no_repeat_ngram_size""": 5, """encoder_no_repeat_ngram_size""": 5, """bad_words_ids""": [1, 2, 3], """num_return_sequences""": 3, """chunk_size_feed_forward""": 5, """output_scores""": True, """return_dict_in_generate""": True, """forced_bos_token_id""": 2, """forced_eos_token_id""": 3, """remove_invalid_values""": True, """architectures""": ["""BertModel"""], """finetuning_task""": """translation""", """id2label""": {0: """label"""}, """label2id""": {"""label""": """0"""}, """tokenizer_class""": """BertTokenizerFast""", """prefix""": """prefix""", """bos_token_id""": 6, """pad_token_id""": 7, """eos_token_id""": 8, """sep_token_id""": 9, """decoder_start_token_id""": 10, """exponential_decay_length_penalty""": (5, 1.01), """suppress_tokens""": [0, 1], """begin_suppress_tokens""": 2, """task_specific_params""": {"""translation""": """some_params"""}, """problem_type""": """regression""", } @is_staging_test class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @classmethod def _A ( cls : Optional[Any] ): SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(UpperCAmelCase_ ) @classmethod def _A ( cls : Optional[int] ): try: delete_repo(token=cls._token , repo_id="test-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config" ) except HTTPError: pass def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("test-config" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase_ , repo_id="test-config" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase_ , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _A ( self : List[Any] ): CustomConfig.register_for_auto_class() SCREAMING_SNAKE_CASE : Union[str, Any] = CustomConfig(attribute=42 ) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase_ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig" ) self.assertEqual(new_config.attribute , 42 ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Any = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated SCREAMING_SNAKE_CASE : Optional[int] = c.n_embd + 1 # int SCREAMING_SNAKE_CASE : Any = c.resid_pdrop + 1.0 # float SCREAMING_SNAKE_CASE : Optional[int] = not c.scale_attn_weights # bool SCREAMING_SNAKE_CASE : Union[str, Any] = c.summary_type + "foo" # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(UpperCAmelCase_ , c.n_embd , "mismatch for key: n_embd" ) self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" ) self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" ) self.assertEqual(UpperCAmelCase_ , c.summary_type , "mismatch for key: summary_type" ) def _A ( self : int ): SCREAMING_SNAKE_CASE : List[str] = PretrainedConfig() SCREAMING_SNAKE_CASE : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( UpperCAmelCase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) SCREAMING_SNAKE_CASE : str = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )] if len(UpperCAmelCase_ ) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f''' {', '.join(UpperCAmelCase_ )}.''' ) def _A ( self : str ): with self.assertRaises(UpperCAmelCase_ ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ) SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" ) self.assertIsNotNone(UpperCAmelCase_ ) def _A ( self : Optional[int] ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE : Tuple = mock.Mock() SCREAMING_SNAKE_CASE : str = 500 SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Dict = HTTPError SCREAMING_SNAKE_CASE : Dict = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head: SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() def _A ( self : Any ): # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : List[Any] = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = 2 json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , "w" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 SCREAMING_SNAKE_CASE : str = ["config.42.0.0.json"] SCREAMING_SNAKE_CASE : List[str] = 768 configuration.save_pretrained(UpperCAmelCase_ ) shutil.move(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase_ , "config.42.0.0.json" ) ) SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertEqual(new_configuration.hidden_size , 768 ) def _A ( self : Dict ): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. SCREAMING_SNAKE_CASE : Dict = "hf-internal-testing/test-two-configs" import transformers as new_transformers SCREAMING_SNAKE_CASE : Optional[int] = "v4.0.0" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = new_transformers.models.auto.AutoConfig.from_pretrained( UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(UpperCAmelCase_ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers SCREAMING_SNAKE_CASE : int = "v3.0.0" SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertEqual(old_configuration.hidden_size , 768 )
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''input_features'''] def __init__( self : Any , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : str=1_6000 , UpperCAmelCase_ : Optional[int]=160 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Tuple , ): super().__init__( feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Any = n_fft SCREAMING_SNAKE_CASE : List[Any] = hop_length SCREAMING_SNAKE_CASE : List[str] = chunk_length SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length * sampling_rate SCREAMING_SNAKE_CASE : List[Any] = self.n_samples // hop_length SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCAmelCase_ , norm="slaney" , mel_scale="slaney" , ) def _A ( self : Optional[int] , UpperCAmelCase_ : np.array ): SCREAMING_SNAKE_CASE : List[Any] = spectrogram( UpperCAmelCase_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) SCREAMING_SNAKE_CASE : List[str] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : Tuple = np.maximum(UpperCAmelCase_ , log_spec.max() - 8.0 ) SCREAMING_SNAKE_CASE : Dict = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _A ( UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : float = 0.0 ): if attention_mask is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(UpperCAmelCase_ , np.intaa ) SCREAMING_SNAKE_CASE : int = [] for vector, length in zip(UpperCAmelCase_ , attention_mask.sum(-1 ) ): SCREAMING_SNAKE_CASE : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: SCREAMING_SNAKE_CASE : Optional[Any] = padding_value normed_input_values.append(UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : List[Any] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "max_length" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Union[str, Any] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) SCREAMING_SNAKE_CASE : Any = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE : List[Any] = is_batched_numpy or ( isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ): SCREAMING_SNAKE_CASE : List[str] = np.asarray(UpperCAmelCase_ , dtype=np.floataa ) elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Any = [np.asarray([raw_speech] ).T] SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = self.pad( UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=max_length if max_length else self.n_samples , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) SCREAMING_SNAKE_CASE : int = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) SCREAMING_SNAKE_CASE : int = [self._np_extract_fbank_features(UpperCAmelCase_ ) for waveform in input_features[0]] if isinstance(input_features[0] , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_features] else: SCREAMING_SNAKE_CASE : int = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) SCREAMING_SNAKE_CASE : List[Any] = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase_ ) return padded_inputs def _A ( self : str ): SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : List[Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer snake_case = logging.get_logger(__name__) snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json""" ), }, } snake_case = { """yjernite/retribert-base-uncased""": 512, } snake_case = { """yjernite/retribert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Optional[int] = RetriBertTokenizer UpperCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]="[UNK]" , UpperCAmelCase_ : List[str]="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Tuple="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[str] , ): super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase_ ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_ ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : int = getattr(UpperCAmelCase_ , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Tuple = do_lower_case SCREAMING_SNAKE_CASE : List[Any] = strip_accents SCREAMING_SNAKE_CASE : int = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = do_lower_case def _A ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ )
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCamelCase__ ( lowercase ): # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCamelCase__ ( ): """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3] with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=2 ) with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [1, 2] SCREAMING_SNAKE_CASE : Optional[int] = {"a": 1, "b": 2} SCREAMING_SNAKE_CASE : Dict = {"a": [1, 2], "b": [3, 4]} SCREAMING_SNAKE_CASE : Optional[Any] = {"a": {"1": 1}, "b": 2} SCREAMING_SNAKE_CASE : int = {"a": 1, "b": 2, "c": 3, "d": 4} SCREAMING_SNAKE_CASE : Dict = [2, 3] SCREAMING_SNAKE_CASE : str = {"a": 2, "b": 3} SCREAMING_SNAKE_CASE : str = {"a": [2, 3], "b": [4, 5]} SCREAMING_SNAKE_CASE : Optional[Any] = {"a": {"1": 2}, "b": 3} SCREAMING_SNAKE_CASE : List[str] = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
def lowerCamelCase__ ( lowercase ): # noqa: E741 """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase ) SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Any = [0] * n SCREAMING_SNAKE_CASE : List[Any] = [False] * n SCREAMING_SNAKE_CASE : Any = [False] * n def dfs(lowercase , lowercase , lowercase , lowercase ): if parent == root: out_edge_count += 1 SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Any = at for to in l[at]: if to == parent: pass elif not visited[to]: SCREAMING_SNAKE_CASE : Optional[int] = dfs(lowercase , lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE : List[Any] = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: SCREAMING_SNAKE_CASE : Any = True # AP found via cycle if at == low[to]: SCREAMING_SNAKE_CASE : Optional[int] = True else: SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , lowercase ) return out_edge_count for i in range(lowercase ): if not visited[i]: SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Dict = dfs(lowercase , lowercase , -1 , lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = out_edge_count > 1 for x in range(len(lowercase ) ): if is_art[x] is True: print(lowercase ) # Adjacency list of graph snake_case = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Tuple = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : str = False def _A ( self : List[str] ): super().setUp() SCREAMING_SNAKE_CASE : List[Any] = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" " ) SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) SCREAMING_SNAKE_CASE : Any = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + "\n" ) def _A ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=20 , UpperCAmelCase_ : Dict=5 ): SCREAMING_SNAKE_CASE : Optional[Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase_ )) for i in range(len(UpperCAmelCase_ ) )] SCREAMING_SNAKE_CASE : Optional[Any] = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCAmelCase_ ) , UpperCAmelCase_ ) ) if max_length is not None and len(UpperCAmelCase_ ) > max_length: SCREAMING_SNAKE_CASE : str = toks[:max_length] if min_length is not None and len(UpperCAmelCase_ ) < min_length and len(UpperCAmelCase_ ) > 0: while len(UpperCAmelCase_ ) < min_length: SCREAMING_SNAKE_CASE : str = toks + toks # toks_str = [t[1] for t in toks] SCREAMING_SNAKE_CASE : Any = [t[0] for t in toks] # Ensure consistency SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) if " " not in output_txt and len(UpperCAmelCase_ ) > 1: SCREAMING_SNAKE_CASE : Union[str, Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase_ ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase_ ) ) if with_prefix_space: SCREAMING_SNAKE_CASE : Optional[int] = " " + output_txt SCREAMING_SNAKE_CASE : str = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) return output_txt, output_ids def _A ( self : Any , **UpperCAmelCase_ : List[str] ): kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) # check adding a single token tokenizer.add_tokens("xxx" ) SCREAMING_SNAKE_CASE : str = tokenizer("m xxx ɪ" , do_phonemize=UpperCAmelCase_ ).input_ids self.assertEqual(UpperCAmelCase_ , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"] ) SCREAMING_SNAKE_CASE : Tuple = tokenizer("m aaa ɪ ccc" , do_phonemize=UpperCAmelCase_ ).input_ids self.assertEqual(UpperCAmelCase_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa SCREAMING_SNAKE_CASE : List[Any] = tokenizer("maɪ c" , do_phonemize=UpperCAmelCase_ ).input_ids self.assertEqual(UpperCAmelCase_ , [3, 200] ) # mai should be <unk> (=3) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) SCREAMING_SNAKE_CASE : Optional[int] = "Hello how are you" SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) self.assertEqual(UpperCAmelCase_ , "h ə l oʊ h aʊ ɑːɹ j uː" ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) SCREAMING_SNAKE_CASE : List[Any] = "Hello how are you" SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(UpperCAmelCase_ ).input_ids , tokenizer(UpperCAmelCase_ , do_phonemize=UpperCAmelCase_ ).input_ids ) def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) SCREAMING_SNAKE_CASE : List[Any] = "Hello how are you" SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(tokenizer(UpperCAmelCase_ ).input_ids ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) SCREAMING_SNAKE_CASE : List[str] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(sample_ids[0] ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , batch_tokens[0] ) self.assertEqual(UpperCAmelCase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) def _A ( self : str ): SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) SCREAMING_SNAKE_CASE : Any = "Hello how are you" SCREAMING_SNAKE_CASE : Dict = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) self.assertEqual(UpperCAmelCase_ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) SCREAMING_SNAKE_CASE : str = "Hello how are you" SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(UpperCAmelCase_ ).input_ids , tokenizer(UpperCAmelCase_ , do_phonemize=UpperCAmelCase_ ).input_ids ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off SCREAMING_SNAKE_CASE : Any = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , batch_tokens[0] ) self.assertEqual(UpperCAmelCase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) # decode with no word_del_token filter SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_ , filter_word_delimiter_token=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , batch_tokens[0] ) self.assertEqual(UpperCAmelCase_ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) SCREAMING_SNAKE_CASE : Union[str, Any] = "Hello how are you" SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(tokenizer(UpperCAmelCase_ ).input_ids , filter_word_delimiter_token=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) SCREAMING_SNAKE_CASE : Union[str, Any] = "Hello how are you" SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.phonemize(UpperCAmelCase_ , phonemizer_lang="en-us" ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(tokenizer(UpperCAmelCase_ ).input_ids , filter_word_delimiter_token=UpperCAmelCase_ ) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , UpperCAmelCase_ ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = "Hello how are you" SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(UpperCAmelCase_ , phonemizer_lang="en-us" ).input_ids SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCAmelCase_ , phonemizer_lang="fr-fr" ).input_ids self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , "h ə l oʊ h aʊ ɑːɹ j uː" ) self.assertEqual(UpperCAmelCase_ , "ɛ l o h aʊ a ʁ j u" ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) SCREAMING_SNAKE_CASE : Tuple = "Hello how Are you" SCREAMING_SNAKE_CASE : Dict = "hello how are you" SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCAmelCase_ ).input_ids self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) tokenizer.add_tokens(["!", "?"] ) tokenizer.add_special_tokens({"cls_token": "$$$"} ) # fmt: off SCREAMING_SNAKE_CASE : int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] ) @staticmethod def _A ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = [d[key] for d in offsets] return retrieved_list def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" SCREAMING_SNAKE_CASE : Dict = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on SCREAMING_SNAKE_CASE : str = tokenizer.decode(UpperCAmelCase_ , output_char_offsets=UpperCAmelCase_ , filter_word_delimiter_token=UpperCAmelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue("text" in outputs ) self.assertTrue("char_offsets" in outputs ) self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : str = self.get_tokenizer(word_delimiter_token="|" ) def check_list_tuples_equal(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ): self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(isinstance(outputs_list[0] , UpperCAmelCase_ ) ) # transform list to ModelOutput SCREAMING_SNAKE_CASE : Any = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] ) def recursive_check(UpperCAmelCase_ : Any , UpperCAmelCase_ : int ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): [recursive_check(UpperCAmelCase_ , UpperCAmelCase_ ) for la, la in zip(UpperCAmelCase_ , UpperCAmelCase_ )] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] ) # fmt: off SCREAMING_SNAKE_CASE : Optional[int] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCAmelCase_ , output_char_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [tokenizer.decode(UpperCAmelCase_ , output_char_offsets=UpperCAmelCase_ ) for ids in sample_ids] check_list_tuples_equal(UpperCAmelCase_ , UpperCAmelCase_ ) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" ) def _A ( self : Tuple ): pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" ) def _A ( self : Any ): pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" ) def _A ( self : Union[str, Any] ): pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" ) def _A ( self : List[str] ): pass def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE : Any = tokenizer.vocab_size SCREAMING_SNAKE_CASE : str = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE : Tuple = ["aaaaa bbbbbb", "cccccccccdddddddd"] SCREAMING_SNAKE_CASE : str = tokenizer.add_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size SCREAMING_SNAKE_CASE : int = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} SCREAMING_SNAKE_CASE : List[Any] = tokenizer.add_special_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.vocab_size SCREAMING_SNAKE_CASE : Tuple = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _A ( self : List[str] ): pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _A ( self : List[Any] ): pass def _A ( self : Union[str, Any] ): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(fast=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE : str = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_string(UpperCAmelCase_ ) self.assertIsInstance(output["text"] , UpperCAmelCase_ )
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Tuple = XLMRobertaTokenizer UpperCamelCase_ : Union[str, Any] = XLMRobertaTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Union[str, Any] = True def _A ( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Optional[int] = "<pad>" SCREAMING_SNAKE_CASE : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(UpperCAmelCase_ ) , 1002 ) def _A ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def _A ( self : Union[str, Any] ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : int = tokenizer_r.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @cached_property def _A ( self : Optional[Any] ): return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" ) def _A ( self : str ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCAmelCase_ , f.name ) SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = pickle.dumps(UpperCAmelCase_ ) pickle.loads(UpperCAmelCase_ ) def _A ( self : str ): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE : str = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Dict = "I was born in 92000, and this is falsé." SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = rust_tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : str = "Hello World!" SCREAMING_SNAKE_CASE : List[str] = [0, 3_5378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[str] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) SCREAMING_SNAKE_CASE : int = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 17_9459, 12_4850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 1_0114, 711, 152, 20, 6, 5, 2_2376, 642, 1221, 1_5190, 3_4153, 450, 5608, 959, 1119, 5_7702, 136, 186, 47, 1098, 2_9367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 5_0901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def _A ( self : Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE : Tuple = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip snake_case = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase__ ( lowercase ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" return max(metric_fn(lowercase , lowercase ) for gt in ground_truths ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(lowercase , "r" ).readlines()] SCREAMING_SNAKE_CASE : Any = [] if args.gold_data_mode == "qa": SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(lowercase , sep="\t" , header=lowercase ) for answer_list in data[1]: SCREAMING_SNAKE_CASE : Any = ast.literal_eval(lowercase ) answers.append(lowercase ) else: SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(lowercase , "r" ).readlines()] SCREAMING_SNAKE_CASE : Tuple = [[reference] for reference in references] SCREAMING_SNAKE_CASE : List[Any] = 0 for prediction, ground_truths in zip(lowercase , lowercase ): total += 1 em += metric_max_over_ground_truths(lowercase , lowercase , lowercase ) fa += metric_max_over_ground_truths(lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE : str = 100.0 * em / total SCREAMING_SNAKE_CASE : Dict = 100.0 * fa / total logger.info(F'''F1: {fa:.2f}''' ) logger.info(F'''EM: {em:.2f}''' ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = args.k SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(lowercase , "r" ).readlines()] SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(lowercase , "r" ).readlines()] SCREAMING_SNAKE_CASE : Tuple = 0 for hypo, reference in zip(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = set(hypo.split("\t" )[:k] ) SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k SCREAMING_SNAKE_CASE : List[Any] = 100.0 * em / total logger.info(F'''Precision@{k}: {em: .2f}''' ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" def strip_title(lowercase ): if title.startswith("\"" ): SCREAMING_SNAKE_CASE : Dict = title[1:] if title.endswith("\"" ): SCREAMING_SNAKE_CASE : List[Any] = title[:-1] return title SCREAMING_SNAKE_CASE : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowercase , return_tensors="pt" , padding=lowercase , truncation=lowercase , )["input_ids"].to(args.device ) SCREAMING_SNAKE_CASE : List[Any] = rag_model.rag.question_encoder(lowercase ) SCREAMING_SNAKE_CASE : Tuple = question_enc_outputs[0] SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever( lowercase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) SCREAMING_SNAKE_CASE : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) SCREAMING_SNAKE_CASE : Tuple = [] for docs in all_docs: SCREAMING_SNAKE_CASE : Optional[Any] = [strip_title(lowercase ) for title in docs["title"]] provenance_strings.append("\t".join(lowercase ) ) return provenance_strings def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowercase , return_tensors="pt" , padding=lowercase , truncation=lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) SCREAMING_SNAKE_CASE : Tuple = inputs_dict.attention_mask.to(args.device ) SCREAMING_SNAKE_CASE : str = rag_model.generate( # rag_model overwrites generate lowercase , attention_mask=lowercase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowercase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) SCREAMING_SNAKE_CASE : Any = rag_model.retriever.generator_tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) if args.print_predictions: for q, a in zip(lowercase , lowercase ): logger.info("Q: {} - A: {}".format(lowercase , lowercase ) ) return answers def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=lowercase , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=lowercase , choices=["exact", "compressed", "legacy"] , type=lowercase , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=lowercase , type=lowercase , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=lowercase , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=lowercase , type=lowercase , required=lowercase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=lowercase , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=lowercase , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=lowercase , type=lowercase , required=lowercase , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=lowercase , type=lowercase , required=lowercase , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=lowercase , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=lowercase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=lowercase , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=lowercase , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=lowercase , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=lowercase , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {} if args.model_type is None: SCREAMING_SNAKE_CASE : Any = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration SCREAMING_SNAKE_CASE : Tuple = args.n_docs if args.index_name is not None: SCREAMING_SNAKE_CASE : int = args.index_name if args.index_path is not None: SCREAMING_SNAKE_CASE : List[Any] = args.index_path else: SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration SCREAMING_SNAKE_CASE : List[str] = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , lowercase ) SCREAMING_SNAKE_CASE : List[str] = get_scores if args.eval_mode == "e2e" else get_precision_at_k SCREAMING_SNAKE_CASE : Optional[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(lowercase , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(lowercase ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): SCREAMING_SNAKE_CASE : Any = RagRetriever.from_pretrained(lowercase , **lowercase ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowercase , retriever=lowercase , **lowercase ) model.retriever.init_retrieval() else: SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(lowercase , **lowercase ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: SCREAMING_SNAKE_CASE : Optional[int] = [] for line in tqdm(lowercase ): questions.append(line.strip() ) if len(lowercase ) == args.eval_batch_size: SCREAMING_SNAKE_CASE : List[Any] = evaluate_batch_fn(lowercase , lowercase , lowercase ) preds_file.write("\n".join(lowercase ) + "\n" ) preds_file.flush() SCREAMING_SNAKE_CASE : Dict = [] if len(lowercase ) > 0: SCREAMING_SNAKE_CASE : int = evaluate_batch_fn(lowercase , lowercase , lowercase ) preds_file.write("\n".join(lowercase ) ) preds_file.flush() score_fn(lowercase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": snake_case = get_args() main(args)
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
from __future__ import annotations snake_case = tuple[int, int, int] snake_case = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- snake_case = """EGZWVONAHDCLFQMSIPJBYUKXTR""" snake_case = """FOBHMDKEXQNRAULPGSJVTYICZW""" snake_case = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- snake_case = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- snake_case = """RMDJXFUWGISLHVTCQNKYPBEZOA""" snake_case = """SGLCPQWZHKXAREONTFBVIYJUDM""" snake_case = """HVSICLTYKQUBXDWAJZOMFGPREN""" snake_case = """RZWQHFMVDBKICJLNTUXAGYPSOE""" snake_case = """LFKIJODBEGAMQPXVUHYSTCZRWN""" snake_case = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if (unique_rotsel := len(set(lowercase ) )) < 3: SCREAMING_SNAKE_CASE : Tuple = F'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(lowercase ) # Checks if rotor positions are valid SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = rotpos if not 0 < rotorposa <= len(lowercase ): SCREAMING_SNAKE_CASE : Tuple = F'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(lowercase ) if not 0 < rotorposa <= len(lowercase ): SCREAMING_SNAKE_CASE : List[str] = F'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(lowercase ) if not 0 < rotorposa <= len(lowercase ): SCREAMING_SNAKE_CASE : Any = F'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(lowercase ) # Validates string and returns dict SCREAMING_SNAKE_CASE : List[str] = _plugboard(lowercase ) return rotpos, rotsel, pbdict def lowerCamelCase__ ( lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ): SCREAMING_SNAKE_CASE : int = F'''Plugboard setting isn\'t type string ({type(lowercase )})''' raise TypeError(lowercase ) elif len(lowercase ) % 2 != 0: SCREAMING_SNAKE_CASE : List[str] = F'''Odd number of symbols ({len(lowercase )})''' raise Exception(lowercase ) elif pbstring == "": return {} pbstring.replace(" " , "" ) # Checks if all characters are unique SCREAMING_SNAKE_CASE : Any = set() for i in pbstring: if i not in abc: SCREAMING_SNAKE_CASE : Any = F'''\'{i}\' not in list of symbols''' raise Exception(lowercase ) elif i in tmppbl: SCREAMING_SNAKE_CASE : List[str] = F'''Duplicate symbol ({i})''' raise Exception(lowercase ) else: tmppbl.add(lowercase ) del tmppbl # Created the dictionary SCREAMING_SNAKE_CASE : List[Any] = {} for j in range(0 , len(lowercase ) - 1 , 2 ): SCREAMING_SNAKE_CASE : Optional[Any] = pbstring[j + 1] SCREAMING_SNAKE_CASE : Dict = pbstring[j] return pb def lowerCamelCase__ ( lowercase , lowercase , lowercase = (rotora, rotora, rotora) , lowercase = "" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = text.upper() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = _validator( lowercase , lowercase , plugb.upper() ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = rotor_position SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 SCREAMING_SNAKE_CASE : Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: SCREAMING_SNAKE_CASE : str = plugboard[symbol] # rotor ra -------------------------- SCREAMING_SNAKE_CASE : int = abc.index(lowercase ) + rotorposa SCREAMING_SNAKE_CASE : Dict = rotora[index % len(lowercase )] # rotor rb -------------------------- SCREAMING_SNAKE_CASE : str = abc.index(lowercase ) + rotorposa SCREAMING_SNAKE_CASE : str = rotora[index % len(lowercase )] # rotor rc -------------------------- SCREAMING_SNAKE_CASE : Tuple = abc.index(lowercase ) + rotorposa SCREAMING_SNAKE_CASE : List[Any] = rotora[index % len(lowercase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher SCREAMING_SNAKE_CASE : Optional[int] = reflector[symbol] # 2nd rotors SCREAMING_SNAKE_CASE : List[Any] = abc[rotora.index(lowercase ) - rotorposa] SCREAMING_SNAKE_CASE : Tuple = abc[rotora.index(lowercase ) - rotorposa] SCREAMING_SNAKE_CASE : List[str] = abc[rotora.index(lowercase ) - rotorposa] # 2nd plugboard if symbol in plugboard: SCREAMING_SNAKE_CASE : Union[str, Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = 0 rotorposa += 1 if rotorposa >= len(lowercase ): SCREAMING_SNAKE_CASE : List[str] = 0 rotorposa += 1 if rotorposa >= len(lowercase ): SCREAMING_SNAKE_CASE : List[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(lowercase ) return "".join(lowercase ) if __name__ == "__main__": snake_case = """This is my Python script that emulates the Enigma machine from WWII.""" snake_case = (1, 1, 1) snake_case = """pictures""" snake_case = (rotora, rotora, rotora) snake_case = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case = logging.get_logger(__name__) snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } snake_case = { """gpt2""": 1_024, """gpt2-medium""": 1_024, """gpt2-large""": 1_024, """gpt2-xl""": 1_024, """distilgpt2""": 1_024, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = ['''input_ids''', '''attention_mask'''] UpperCamelCase_ : str = GPTaTokenizer def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int="<|endoftext|>" , UpperCAmelCase_ : int="<|endoftext|>" , UpperCAmelCase_ : Optional[int]="<|endoftext|>" , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : List[str] , ): super().__init__( UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("add_bos_token" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space: SCREAMING_SNAKE_CASE : Any = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Any = add_prefix_space SCREAMING_SNAKE_CASE : Optional[Any] = pre_tok_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space def _A ( self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : str = kwargs.get("is_split_into_words" , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[Any] = kwargs.get("is_split_into_words" , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : "Conversation" ): SCREAMING_SNAKE_CASE : List[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] ) if len(UpperCAmelCase_ ) > self.model_max_length: SCREAMING_SNAKE_CASE : int = input_ids[-self.model_max_length :] return input_ids
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = ['''image_processor''', '''tokenizer'''] UpperCamelCase_ : str = '''ViltImageProcessor''' UpperCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : int , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = self.image_processor def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : str , ): SCREAMING_SNAKE_CASE : Dict = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel_values + pixel_mask SCREAMING_SNAKE_CASE : List[str] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def _A ( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _A ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , ) return self.image_processor_class @property def _A ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase_ , ) return self.image_processor
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets snake_case = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ snake_case = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ snake_case = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return float((preds == labels).mean() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = simple_accuracy(lowercase , lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = np.array(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = np.array(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = en_sentvecs.shape[0] # mean centering SCREAMING_SNAKE_CASE : str = en_sentvecs - np.mean(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = in_sentvecs - np.mean(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Dict = cdist(lowercase , lowercase , "cosine" ) SCREAMING_SNAKE_CASE : Tuple = np.array(range(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = sim.argsort(axis=1 )[:, :10] SCREAMING_SNAKE_CASE : Optional[Any] = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def _A ( self : Tuple ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def _A ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
def lowerCamelCase__ ( lowercase = 1000000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = limit + 1 SCREAMING_SNAKE_CASE : Any = [0] * limit for first_term in range(1 , lowercase ): for n in range(lowercase , lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a SCREAMING_SNAKE_CASE : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"""{solution() = }""")
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = numpy.dtype(numpy.uintaa ).newbyteorder(">" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=lowercase )[0] @deprecated(lowercase , "Please use tf.data to implement this functionality." ) def lowerCamelCase__ ( lowercase ): """simple docstring""" print("Extracting" , f.name ) with gzip.GzipFile(fileobj=lowercase ) as bytestream: SCREAMING_SNAKE_CASE : int = _readaa(lowercase ) if magic != 2051: raise ValueError( "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) ) SCREAMING_SNAKE_CASE : Tuple = _readaa(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = _readaa(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = _readaa(lowercase ) SCREAMING_SNAKE_CASE : Tuple = bytestream.read(rows * cols * num_images ) SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.frombuffer(lowercase , dtype=numpy.uinta ) SCREAMING_SNAKE_CASE : List[str] = data.reshape(lowercase , lowercase , lowercase , 1 ) return data @deprecated(lowercase , "Please use tf.one_hot on tensors." ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = labels_dense.shape[0] SCREAMING_SNAKE_CASE : str = numpy.arange(lowercase ) * num_classes SCREAMING_SNAKE_CASE : List[str] = numpy.zeros((num_labels, num_classes) ) SCREAMING_SNAKE_CASE : List[Any] = 1 return labels_one_hot @deprecated(lowercase , "Please use tf.data to implement this functionality." ) def lowerCamelCase__ ( lowercase , lowercase=False , lowercase=10 ): """simple docstring""" print("Extracting" , f.name ) with gzip.GzipFile(fileobj=lowercase ) as bytestream: SCREAMING_SNAKE_CASE : int = _readaa(lowercase ) if magic != 2049: raise ValueError( "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) ) SCREAMING_SNAKE_CASE : Dict = _readaa(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = bytestream.read(lowercase ) SCREAMING_SNAKE_CASE : Any = numpy.frombuffer(lowercase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(lowercase , lowercase ) return labels class SCREAMING_SNAKE_CASE : '''simple docstring''' @deprecated( UpperCAmelCase_ , "Please use alternatives such as official/mnist/_DataSet.py" " from tensorflow/models." , ) def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[Any]=dtypes.floataa , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=None , ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = random_seed.get_seed(UpperCAmelCase_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) SCREAMING_SNAKE_CASE : Dict = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype ) if fake_data: SCREAMING_SNAKE_CASE : Optional[int] = 1_0000 SCREAMING_SNAKE_CASE : Dict = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' SCREAMING_SNAKE_CASE : Dict = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 SCREAMING_SNAKE_CASE : Optional[Any] = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. SCREAMING_SNAKE_CASE : List[Any] = images.astype(numpy.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = numpy.multiply(UpperCAmelCase_ , 1.0 / 255.0 ) SCREAMING_SNAKE_CASE : Tuple = images SCREAMING_SNAKE_CASE : Dict = labels SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : int = 0 @property def _A ( self : int ): return self._images @property def _A ( self : str ): return self._labels @property def _A ( self : Any ): return self._num_examples @property def _A ( self : Any ): return self._epochs_completed def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=True ): if fake_data: SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * 784 SCREAMING_SNAKE_CASE : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(UpperCAmelCase_ )], [fake_label for _ in range(UpperCAmelCase_ )], ) SCREAMING_SNAKE_CASE : str = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.images[perma] SCREAMING_SNAKE_CASE : Optional[int] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch SCREAMING_SNAKE_CASE : List[Any] = self._num_examples - start SCREAMING_SNAKE_CASE : Union[str, Any] = self._images[start : self._num_examples] SCREAMING_SNAKE_CASE : str = self._labels[start : self._num_examples] # Shuffle the data if shuffle: SCREAMING_SNAKE_CASE : Dict = numpy.arange(self._num_examples ) numpy.random.shuffle(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.images[perm] SCREAMING_SNAKE_CASE : Union[str, Any] = self.labels[perm] # Start next epoch SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Dict = batch_size - rest_num_examples SCREAMING_SNAKE_CASE : Tuple = self._index_in_epoch SCREAMING_SNAKE_CASE : Any = self._images[start:end] SCREAMING_SNAKE_CASE : Tuple = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size SCREAMING_SNAKE_CASE : Union[str, Any] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(lowercase , "Please write your own downloading logic." ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if not gfile.Exists(lowercase ): gfile.MakeDirs(lowercase ) SCREAMING_SNAKE_CASE : str = os.path.join(lowercase , lowercase ) if not gfile.Exists(lowercase ): urllib.request.urlretrieve(lowercase , lowercase ) # noqa: S310 with gfile.GFile(lowercase ) as f: SCREAMING_SNAKE_CASE : List[Any] = f.size() print("Successfully downloaded" , lowercase , lowercase , "bytes." ) return filepath @deprecated( lowercase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" ) def lowerCamelCase__ ( lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=5000 , lowercase=None , lowercase=DEFAULT_SOURCE_URL , ): """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=lowercase , one_hot=lowercase , dtype=lowercase , seed=lowercase ) SCREAMING_SNAKE_CASE : Tuple = fake() SCREAMING_SNAKE_CASE : str = fake() SCREAMING_SNAKE_CASE : Any = fake() return _Datasets(train=lowercase , validation=lowercase , test=lowercase ) if not source_url: # empty string check SCREAMING_SNAKE_CASE : Optional[Any] = DEFAULT_SOURCE_URL SCREAMING_SNAKE_CASE : Optional[int] = "train-images-idx3-ubyte.gz" SCREAMING_SNAKE_CASE : List[Any] = "train-labels-idx1-ubyte.gz" SCREAMING_SNAKE_CASE : Optional[int] = "t10k-images-idx3-ubyte.gz" SCREAMING_SNAKE_CASE : Optional[Any] = "t10k-labels-idx1-ubyte.gz" SCREAMING_SNAKE_CASE : Any = _maybe_download( lowercase , lowercase , source_url + train_images_file ) with gfile.Open(lowercase , "rb" ) as f: SCREAMING_SNAKE_CASE : List[Any] = _extract_images(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = _maybe_download( lowercase , lowercase , source_url + train_labels_file ) with gfile.Open(lowercase , "rb" ) as f: SCREAMING_SNAKE_CASE : List[str] = _extract_labels(lowercase , one_hot=lowercase ) SCREAMING_SNAKE_CASE : Tuple = _maybe_download( lowercase , lowercase , source_url + test_images_file ) with gfile.Open(lowercase , "rb" ) as f: SCREAMING_SNAKE_CASE : Tuple = _extract_images(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = _maybe_download( lowercase , lowercase , source_url + test_labels_file ) with gfile.Open(lowercase , "rb" ) as f: SCREAMING_SNAKE_CASE : str = _extract_labels(lowercase , one_hot=lowercase ) if not 0 <= validation_size <= len(lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = ( "Validation size should be between 0 and " F'''{len(lowercase )}. Received: {validation_size}.''' ) raise ValueError(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = train_images[:validation_size] SCREAMING_SNAKE_CASE : Optional[Any] = train_labels[:validation_size] SCREAMING_SNAKE_CASE : Optional[Any] = train_images[validation_size:] SCREAMING_SNAKE_CASE : Optional[int] = train_labels[validation_size:] SCREAMING_SNAKE_CASE : List[Any] = {"dtype": dtype, "reshape": reshape, "seed": seed} SCREAMING_SNAKE_CASE : Any = _DataSet(lowercase , lowercase , **lowercase ) SCREAMING_SNAKE_CASE : str = _DataSet(lowercase , lowercase , **lowercase ) SCREAMING_SNAKE_CASE : Tuple = _DataSet(lowercase , lowercase , **lowercase ) return _Datasets(train=lowercase , validation=lowercase , test=lowercase )
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) SCREAMING_SNAKE_CASE : List[Any] = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase_ ) , torch_builtin(UpperCAmelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase_ ) , gelu_new(UpperCAmelCase_ ) ) ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) SCREAMING_SNAKE_CASE : Tuple = get_activation("gelu" ) SCREAMING_SNAKE_CASE : List[Any] = get_activation("gelu_10" ) SCREAMING_SNAKE_CASE : int = torch_builtin(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = geluaa(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCAmelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _A ( self : str ): get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(UpperCAmelCase_ ): get_activation("bogus" ) with self.assertRaises(UpperCAmelCase_ ): get_activation(UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = get_activation("gelu" ) SCREAMING_SNAKE_CASE : List[Any] = 1 SCREAMING_SNAKE_CASE : int = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Dict = acta.a
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger() @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=lowerCAmelCase ) UpperCamelCase_ : list = field(default_factory=lowerCAmelCase ) def _A ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor ): SCREAMING_SNAKE_CASE : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase_ , nn.Convad ) or isinstance(UpperCAmelCase_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCAmelCase_ ) def __call__( self : Union[str, Any] , UpperCAmelCase_ : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCAmelCase_ ) [x.remove() for x in self.handles] return self @property def _A ( self : int ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda UpperCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 1 UpperCamelCase_ : List = field(default_factory=lowerCAmelCase ) UpperCamelCase_ : List = field(default_factory=lowerCAmelCase ) UpperCamelCase_ : bool = True def __call__( self : Dict , UpperCAmelCase_ : Tensor ): SCREAMING_SNAKE_CASE : List[Any] = Tracker(self.dest )(UpperCAmelCase_ ).parametrized SCREAMING_SNAKE_CASE : Optional[Any] = Tracker(self.src )(UpperCAmelCase_ ).parametrized SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.src_skip , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Dict = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.dest_skip , UpperCAmelCase_ ) ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ) and self.raise_if_mismatch: raise Exception( f'''Numbers of operations are different. Source module has {len(UpperCAmelCase_ )} operations while''' f''' destination module has {len(UpperCAmelCase_ )}.''' ) for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'''Transfered from={src_m} to={dest_m}''' ) class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : nn.Module ): super().__init__() SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f'''Unexpected layer name {k}''' SCREAMING_SNAKE_CASE : str = len(UpperCAmelCase_ ) + 1 feature_blocks.append((f'''res{block_index}''', v) ) SCREAMING_SNAKE_CASE : str = nn.ModuleDict(UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : Tensor ): return get_trunk_forward_outputs( UpperCAmelCase_ , out_feat_keys=UpperCAmelCase_ , feature_blocks=self._feature_blocks , ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : Dict , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : List[str] = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : List[str] , UpperCAmelCase_ : str ): # default to timm! if x not in self: SCREAMING_SNAKE_CASE : Dict = self.convert_name_to_timm(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = partial(lambda: (timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval(), None) ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = super().__getitem__(UpperCAmelCase_ ) return val class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __getitem__( self : Tuple , UpperCAmelCase_ : str ): if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE : Tuple = RegNetModel else: SCREAMING_SNAKE_CASE : List[str] = RegNetForImageClassification return val def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for from_key, to_key in keys: SCREAMING_SNAKE_CASE : int = from_state_dict[from_key].clone() print(F'''Copied key={from_key} to={to_key}''' ) return to_state_dict def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = from_model_func() SCREAMING_SNAKE_CASE : Union[str, Any] = our_model_func(lowercase ).eval() SCREAMING_SNAKE_CASE : Tuple = ModuleTransfer(src=lowercase , dest=lowercase , raise_if_mismatch=lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn((1, 3, 224, 224) ) module_transfer(lowercase ) if from_state_dict is not None: SCREAMING_SNAKE_CASE : int = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE : List[Any] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] SCREAMING_SNAKE_CASE : Optional[int] = manually_copy_vissl_head(lowercase , our_model.state_dict() , lowercase ) our_model.load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Tuple = our_model(lowercase , output_hidden_states=lowercase ) SCREAMING_SNAKE_CASE : Tuple = ( our_outputs.logits if isinstance(lowercase , lowercase ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE : Tuple = from_model(lowercase ) SCREAMING_SNAKE_CASE : Any = from_output[-1] if type(lowercase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE : List[Any] = our_outputs.hidden_states[-1] assert torch.allclose(lowercase , lowercase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=lowercase , ) SCREAMING_SNAKE_CASE : Any = 224 if "seer" not in name else 384 # we can use the convnext one SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=lowercase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=lowercase , ) print(F'''Pushed {name}''' ) def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = True ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : Dict = 1000 SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) SCREAMING_SNAKE_CASE : Dict = "huggingface/label-files" SCREAMING_SNAKE_CASE : List[Any] = num_labels SCREAMING_SNAKE_CASE : Tuple = json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type="dataset" ) ) , "r" ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = idalabel SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[str] = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } SCREAMING_SNAKE_CASE : Any = NameToOurModelFuncMap() SCREAMING_SNAKE_CASE : Tuple = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowercase , lowercase ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowercase , model_dir=str(lowercase ) , map_location="cpu" ) SCREAMING_SNAKE_CASE : Optional[int] = model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE : str = files["classy_state_dict"]["base_model"]["model"] SCREAMING_SNAKE_CASE : str = model_state_dict["trunk"] model.load_state_dict(lowercase ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE : Dict = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE : Dict = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE : Dict = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE : Any = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned SCREAMING_SNAKE_CASE : Optional[Any] = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE : List[str] = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) SCREAMING_SNAKE_CASE : int = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) SCREAMING_SNAKE_CASE : str = partial( lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase , lowercase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase , lowercase , lowercase , ) return config, expected_shape if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) snake_case = parser.parse_args() snake_case = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser snake_case = logging.getLogger(__name__) torch.set_grad_enabled(False) snake_case = """cuda""" if torch.cuda.is_available() else """cpu""" def lowerCamelCase__ ( lowercase , lowercase=100 , lowercase=" " ): """simple docstring""" SCREAMING_SNAKE_CASE : str = text.split(lowercase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase ) , lowercase )] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = [], [] for title, text in zip(documents["title"] , documents["text"] ): if text is not None: for passage in split_text(lowercase ): titles.append(title if title is not None else "" ) texts.append(lowercase ) return {"title": titles, "text": texts} def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ctx_tokenizer( documents["title"] , documents["text"] , truncation=lowercase , padding="longest" , return_tensors="pt" )["input_ids"] SCREAMING_SNAKE_CASE : Union[str, Any] = ctx_encoder(input_ids.to(device=lowercase ) , return_dict=lowercase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase__ ( lowercase , lowercase , lowercase , ): """simple docstring""" logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way SCREAMING_SNAKE_CASE : int = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words SCREAMING_SNAKE_CASE : Optional[int] = dataset.map(lowercase , batched=lowercase , num_proc=processing_args.num_proc ) # And compute the embeddings SCREAMING_SNAKE_CASE : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) SCREAMING_SNAKE_CASE : int = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space SCREAMING_SNAKE_CASE : Dict = dataset.map( partial(lowercase , ctx_encoder=lowercase , ctx_tokenizer=lowercase ) , batched=lowercase , batch_size=processing_args.batch_size , features=lowercase , ) # And finally save your dataset SCREAMING_SNAKE_CASE : Tuple = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" ) dataset.save_to_disk(lowercase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search SCREAMING_SNAKE_CASE : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings" , custom_index=lowercase ) # And save the index SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(lowercase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : str = field( default=str(Path(lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , ) UpperCamelCase_ : str = field( default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , ) UpperCamelCase_ : str = field( default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={ '''help''': ( '''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or''' ''' \'facebook/dpr-ctx_encoder-multiset-base\'''' ) } , ) UpperCamelCase_ : Optional[str] = field( default=str(Path(lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , ) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : Optional[int] = field( default=lowerCAmelCase , metadata={ '''help''': '''The number of processes to use to split the documents into passages. Default is single process.''' } , ) UpperCamelCase_ : int = field( default=1_6 , metadata={ '''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.''' } , ) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : int = field( default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , ) UpperCamelCase_ : int = field( default=1_2_8 , metadata={ '''help''': ( '''The number of bi-directional links created for every new element during the HNSW index construction.''' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) snake_case = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: snake_case = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP snake_case = False try: snake_case = _is_package_available("""google.colab""") except ModuleNotFoundError: pass @input.register class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ): SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = choices SCREAMING_SNAKE_CASE : str = prompt if sys.platform == "win32": SCREAMING_SNAKE_CASE : Any = "*" else: SCREAMING_SNAKE_CASE : Dict = "➔ " def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , UpperCAmelCase_ ) else: forceWrite(self.choices[index] , UpperCAmelCase_ ) def _A ( self : List[Any] , UpperCAmelCase_ : int ): if index == self.position: forceWrite(f''' {self.arrow_char} ''' ) self.write_choice(UpperCAmelCase_ ) else: forceWrite(f''' {self.choices[index]}''' ) reset_cursor() def _A ( self : Optional[int] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ): SCREAMING_SNAKE_CASE : int = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(UpperCAmelCase_ ) move_cursor(UpperCAmelCase_ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _A ( self : Any ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _A ( self : Optional[int] ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _A ( self : Any ): move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _A ( self : List[str] ): move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : str = int(chr(self.current_selection ) ) SCREAMING_SNAKE_CASE : Dict = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , UpperCAmelCase_ ) else: return else: return def _A ( self : Union[str, Any] , UpperCAmelCase_ : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) SCREAMING_SNAKE_CASE : str = default_choice for i in range(len(self.choices ) ): self.print_choice(UpperCAmelCase_ ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: SCREAMING_SNAKE_CASE : Tuple = int(builtins.input() ) except ValueError: SCREAMING_SNAKE_CASE : List[Any] = default_choice else: SCREAMING_SNAKE_CASE : int = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(UpperCAmelCase_ , "\n" ) return choice
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
snake_case = 8.314462 # Unit - J mol-1 K-1 def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
from bisect import bisect from itertools import accumulate def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = sorted(zip(lowercase , lowercase ) , key=lambda lowercase : x[0] / x[1] , reverse=lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = [i[0] for i in r], [i[1] for i in r] SCREAMING_SNAKE_CASE : str = list(accumulate(lowercase ) ) SCREAMING_SNAKE_CASE : Optional[Any] = bisect(lowercase , lowercase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" if (ksize % 2) == 0: SCREAMING_SNAKE_CASE : Dict = ksize + 1 SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowercase ): for x in range(lowercase ): # distance from center SCREAMING_SNAKE_CASE : Optional[int] = x - ksize // 2 SCREAMING_SNAKE_CASE : Any = y - ksize // 2 # degree to radiant SCREAMING_SNAKE_CASE : str = theta / 180 * np.pi SCREAMING_SNAKE_CASE : str = np.cos(_theta ) SCREAMING_SNAKE_CASE : Optional[int] = np.sin(_theta ) # get kernel x SCREAMING_SNAKE_CASE : int = cos_theta * px + sin_theta * py # get kernel y SCREAMING_SNAKE_CASE : Tuple = -sin_theta * px + cos_theta * py # fill kernel SCREAMING_SNAKE_CASE : Tuple = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image snake_case = imread("""../image_data/lena.jpg""") # turn image in gray scale value snake_case = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges snake_case = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) snake_case = out / out.max() * 255 snake_case = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = StableDiffusionXLImgaImgPipeline UpperCamelCase_ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} UpperCamelCase_ : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''} UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : List[Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) SCREAMING_SNAKE_CASE : Tuple = EulerDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextModelWithProjection(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_a, "tokenizer_2": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=0 ): SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = image / 2 + 0.5 if str(UpperCAmelCase_ ).startswith("mps" ): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "numpy", "strength": 0.75, } return inputs def _A ( self : int ): SCREAMING_SNAKE_CASE : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : str = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = sd_pipe(**UpperCAmelCase_ ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Any = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : List[Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Tuple ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : int ): pass def _A ( self : Any ): SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # forward without prompt embeds SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = 3 * ["this is a negative prompt"] SCREAMING_SNAKE_CASE : int = negative_prompt SCREAMING_SNAKE_CASE : Tuple = 3 * [inputs["prompt"]] SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = 3 * ["this is a negative prompt"] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop("prompt" )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Any = sd_pipe.encode_prompt(UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = sd_pipe( **UpperCAmelCase_ , prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , pooled_prompt_embeds=UpperCAmelCase_ , negative_pooled_prompt_embeds=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int="cpu" , UpperCAmelCase_ : Optional[int]=torch.floataa , UpperCAmelCase_ : List[Any]=0 ): SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _A ( self : Any ): SCREAMING_SNAKE_CASE : Dict = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = self.get_inputs(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = pipe(**UpperCAmelCase_ ).images SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
def lowerCamelCase__ ( lowercase ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : List[Any] = number while duplicate > 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = divmod(lowercase , 10 ) fact_sum += factorial(lowercase ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") snake_case = int(input("""Enter number: """).strip()) print( F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.""" )
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter snake_case = logging.get_logger(__name__) snake_case = {} snake_case = {} snake_case = {} def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) SCREAMING_SNAKE_CASE : List[str] = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) SCREAMING_SNAKE_CASE : str = format_type def lowerCamelCase__ ( lowercase , lowercase , lowercase = None ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): SCREAMING_SNAKE_CASE : Dict = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["""python"""]) _register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""]) _register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""]) _register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""]) _register_formatter(CustomFormatter, """custom""") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""]) else: snake_case = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""") _register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""]) else: snake_case = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""") _register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, """jax""", aliases=[]) else: snake_case = ValueError("""JAX needs to be installed to be able to return JAX arrays.""") _register_unavailable_formatter(_jax_error, """jax""", aliases=[]) def lowerCamelCase__ ( lowercase ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def lowerCamelCase__ ( lowercase , **lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = get_format_type_from_alias(lowercase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowercase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
snake_case = [ (1_000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : str = 0 while place < len(lowercase ): if (place + 1 < len(lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for arabic, roman in ROMAN: ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : int = divmod(lowercase , lowercase ) result.append(roman * factor ) if number == 0: break return "".join(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(100, 0.25) = }""") print(F"""{price_plus_tax(125.50, 0.05) = }""")
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy snake_case = logging.get_logger(__name__) snake_case = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } snake_case = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } snake_case = { """jukebox""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Any = PRETRAINED_LYRIC_TOKENS_SIZES UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=["v3", "v2", "v2"] , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Optional[int]="<|endoftext|>" , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token super().__init__( unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = version SCREAMING_SNAKE_CASE : int = max_n_lyric_tokens SCREAMING_SNAKE_CASE : str = n_genres with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE : Any = json.load(UpperCAmelCase_ ) with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE : Any = json.load(UpperCAmelCase_ ) with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE : Dict = json.load(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: SCREAMING_SNAKE_CASE : Union[str, Any] = oov.replace(r"\-'" , r"\-+'" ) SCREAMING_SNAKE_CASE : str = regex.compile(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.artists_encoder.items()} SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.genres_encoder.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.lyrics_encoder.items()} @property def _A ( self : Tuple ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def _A ( self : Dict ): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Tuple = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists] for genres in range(len(UpperCAmelCase_ ) ): SCREAMING_SNAKE_CASE : Union[str, Any] = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]] SCREAMING_SNAKE_CASE : Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) SCREAMING_SNAKE_CASE : int = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): return list(UpperCAmelCase_ ) def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = self._tokenize(UpperCAmelCase_ ) return artist, genre, lyrics def _A ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": SCREAMING_SNAKE_CASE : List[Any] = artists[idx].lower() SCREAMING_SNAKE_CASE : Union[str, Any] = [genres[idx].lower()] else: SCREAMING_SNAKE_CASE : List[str] = self._normalize(artists[idx] ) + ".v2" SCREAMING_SNAKE_CASE : Optional[int] = [ self._normalize(UpperCAmelCase_ ) + ".v2" for genre in genres[idx].split("_" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": SCREAMING_SNAKE_CASE : Any = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" ) SCREAMING_SNAKE_CASE : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" SCREAMING_SNAKE_CASE : Optional[Any] = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )} SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) + 1 SCREAMING_SNAKE_CASE : Dict = self.vocab SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.vocab.items()} SCREAMING_SNAKE_CASE : int = "" else: SCREAMING_SNAKE_CASE : Tuple = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" ) SCREAMING_SNAKE_CASE : int = self._run_strip_accents(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = lyrics.replace("\\" , "\n" ) SCREAMING_SNAKE_CASE : Any = self.out_of_vocab.sub("" , UpperCAmelCase_ ), [], [] return artists, genres, lyrics def _A ( self : List[Any] , UpperCAmelCase_ : Optional[Any] ): SCREAMING_SNAKE_CASE : List[str] = unicodedata.normalize("NFD" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for char in text: SCREAMING_SNAKE_CASE : Any = unicodedata.category(UpperCAmelCase_ ) if cat == "Mn": continue output.append(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) def _A ( self : Optional[Any] , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Any = ( [chr(UpperCAmelCase_ ) for i in range(ord("a" ) , ord("z" ) + 1 )] + [chr(UpperCAmelCase_ ) for i in range(ord("A" ) , ord("Z" ) + 1 )] + [chr(UpperCAmelCase_ ) for i in range(ord("0" ) , ord("9" ) + 1 )] + ["."] ) SCREAMING_SNAKE_CASE : List[Any] = frozenset(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"_+" ) SCREAMING_SNAKE_CASE : Optional[Any] = "".join([c if c in accepted else "_" for c in text.lower()] ) SCREAMING_SNAKE_CASE : Optional[int] = pattern.sub("_" , UpperCAmelCase_ ).strip("_" ) return text def _A ( self : List[str] , UpperCAmelCase_ : List[str] ): return " ".join(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : bool = False ): # Convert to TensorType if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[str] = TensorType(UpperCAmelCase_ ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf SCREAMING_SNAKE_CASE : Any = tf.constant SCREAMING_SNAKE_CASE : int = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." ) import torch SCREAMING_SNAKE_CASE : Any = torch.tensor SCREAMING_SNAKE_CASE : List[str] = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." ) import jax.numpy as jnp # noqa: F811 SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array SCREAMING_SNAKE_CASE : Optional[Any] = _is_jax else: SCREAMING_SNAKE_CASE : int = np.asarray SCREAMING_SNAKE_CASE : Optional[int] = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: SCREAMING_SNAKE_CASE : List[str] = [inputs] if not is_tensor(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = as_tensor(UpperCAmelCase_ ) except: # noqa E722 raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length." ) return inputs def __call__( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : int="" , UpperCAmelCase_ : Optional[Any]="pt" ): SCREAMING_SNAKE_CASE : Optional[int] = [0, 0, 0] SCREAMING_SNAKE_CASE : Optional[Any] = [artist] * len(self.version ) SCREAMING_SNAKE_CASE : Union[str, Any] = [genres] * len(self.version ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = [-INFINITY] * len(full_tokens[-1] ) SCREAMING_SNAKE_CASE : List[Any] = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ ) for i in range(len(self.version ) ) ] return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} ) def _A ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[Any] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Any = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) ) return (artists_file, genres_file, lyrics_file) def _A ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : Optional[Any] = self.artists_decoder.get(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index] SCREAMING_SNAKE_CASE : int = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index] return artist, genres, lyrics
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ): warnings.warn( "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use FlavaImageProcessor instead." , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
import baseaa def lowerCamelCase__ ( lowercase ): """simple docstring""" return baseaa.baaencode(string.encode("utf-8" ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" return baseaa.baadecode(lowercase ).decode("utf-8" ) if __name__ == "__main__": snake_case = """Hello World!""" snake_case = baseaa_encode(test) print(encoded) snake_case = baseaa_decode(encoded) print(decoded)
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = None UpperCamelCase_ : Dict = None @property def _A ( self : List[Any] ): return self.feat_extract_tester.prepare_feat_extract_dict() def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , "feature_size" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "sampling_rate" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "padding_value" ) ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[str] = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : str = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) for x, y in zip(UpperCAmelCase_ , processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) SCREAMING_SNAKE_CASE : Dict = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Optional[int] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : int = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) SCREAMING_SNAKE_CASE : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="tf" ) SCREAMING_SNAKE_CASE : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _A ( self : int , UpperCAmelCase_ : Dict=False ): def _inputs_have_equal_length(UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = len(input[0] ) for input_slice in input[1:]: if len(UpperCAmelCase_ ) != length: return False return True def _inputs_are_equal(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): return False for input_slice_a, input_slice_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): if not np.allclose(np.asarray(UpperCAmelCase_ ) , np.asarray(UpperCAmelCase_ ) , atol=1E-3 ): return False return True SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : int = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.seq_length_diff SCREAMING_SNAKE_CASE : Dict = self.feat_extract_tester.max_seq_length + pad_diff SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.min_seq_length SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_tester.batch_size SCREAMING_SNAKE_CASE : Optional[Any] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad(UpperCAmelCase_ , padding="longest" ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.pad(UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[-1] ) ) SCREAMING_SNAKE_CASE : Optional[Any] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="max_length" )[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=UpperCAmelCase_ , return_tensors="np" ) SCREAMING_SNAKE_CASE : Dict = input_a[input_name] self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_are_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE : int = feat_extract.pad(UpperCAmelCase_ , pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE : str = input_a[input_name] SCREAMING_SNAKE_CASE : int = feat_extract.pad(UpperCAmelCase_ , padding="longest" , pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , pad_to_multiple_of=10 , max_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , pad_to_multiple_of=10 , max_length=UpperCAmelCase_ , return_tensors="np" , ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] self.assertTrue(all(len(UpperCAmelCase_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(UpperCAmelCase_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct SCREAMING_SNAKE_CASE : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=False ): def _inputs_have_equal_length(UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = len(input[0] ) for input_slice in input[1:]: if len(UpperCAmelCase_ ) != length: return False return True def _inputs_are_equal(UpperCAmelCase_ : str , UpperCAmelCase_ : int ): if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): return False for input_slice_a, input_slice_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): if not np.allclose(np.asarray(UpperCAmelCase_ ) , np.asarray(UpperCAmelCase_ ) , atol=1E-3 ): return False return True SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Tuple = BatchFeature({input_name: speech_inputs} ) # truncate to smallest SCREAMING_SNAKE_CASE : int = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = input_a[input_name] SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) # truncate to smallest with np SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Dict = input_a[input_name] SCREAMING_SNAKE_CASE : Tuple = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) # truncate to middle SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase_ , return_tensors="np" , ) SCREAMING_SNAKE_CASE : List[str] = input_a[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = input_a[input_name] SCREAMING_SNAKE_CASE : str = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" ) SCREAMING_SNAKE_CASE : int = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_are_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , truncation=UpperCAmelCase_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="longest" , truncation=UpperCAmelCase_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="longest" , truncation=UpperCAmelCase_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="max_length" , truncation=UpperCAmelCase_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE : List[Any] = 12 SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase_ , truncation=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_a[input_name] SCREAMING_SNAKE_CASE : str = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of SCREAMING_SNAKE_CASE : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: SCREAMING_SNAKE_CASE : List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) def _A ( self : str ): self._check_padding(numpify=UpperCAmelCase_ ) def _A ( self : Tuple ): self._check_padding(numpify=UpperCAmelCase_ ) def _A ( self : int ): self._check_truncation(numpify=UpperCAmelCase_ ) def _A ( self : List[str] ): self._check_truncation(numpify=UpperCAmelCase_ ) @require_torch def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Any = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : List[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Any = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" )[input_name] SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def _A ( self : int ): SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" )[input_name] SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="tf" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : int = [len(UpperCAmelCase_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Any = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Any = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : int = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , UpperCAmelCase_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = [len(UpperCAmelCase_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Tuple = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Any = min(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="np" ) self.assertIn("attention_mask" , UpperCAmelCase_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar snake_case = TypeVar("""T""") class SCREAMING_SNAKE_CASE ( Generic[T] ): '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : bool = True ): SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists SCREAMING_SNAKE_CASE : str = directed def _A ( self : List[Any] , UpperCAmelCase_ : T , UpperCAmelCase_ : T ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase_ ) self.adj_list[destination_vertex].append(UpperCAmelCase_ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE : Optional[int] = [destination_vertex] SCREAMING_SNAKE_CASE : int = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase_ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE : Any = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex] SCREAMING_SNAKE_CASE : Optional[Any] = [] return self def __repr__( self : Dict ): return pformat(self.adj_list )
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowerCamelCase__ ( lowercase="" ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() return os.path.join(lowercase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5 SCREAMING_SNAKE_CASE : List[Any] = AgentAudio(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCAmelCase_ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(UpperCAmelCase_ ) ) # Ensure that the file contains the same value as the original tensor SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = sf.read(UpperCAmelCase_ ) self.assertTrue(torch.allclose(UpperCAmelCase_ , torch.tensor(UpperCAmelCase_ ) , atol=1E-4 ) ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5 SCREAMING_SNAKE_CASE : int = get_new_path(suffix=".wav" ) sf.write(UpperCAmelCase_ , UpperCAmelCase_ , 1_6000 ) SCREAMING_SNAKE_CASE : Union[str, Any] = AgentAudio(UpperCAmelCase_ ) self.assertTrue(torch.allclose(UpperCAmelCase_ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , UpperCAmelCase_ ) @require_vision @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[int] = torch.randint(0 , 256 , (64, 64, 3) ) SCREAMING_SNAKE_CASE : str = AgentImage(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCAmelCase_ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase_ ) ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : List[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" SCREAMING_SNAKE_CASE : List[str] = Image.open(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = AgentImage(UpperCAmelCase_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase_ ) ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Dict = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" SCREAMING_SNAKE_CASE : Dict = Image.open(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = AgentImage(UpperCAmelCase_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase_ ) ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = "Hey!" SCREAMING_SNAKE_CASE : Optional[Any] = AgentText(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , agent_type.to_string() ) self.assertEqual(UpperCAmelCase_ , agent_type.to_raw() ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = { """configuration_clap""": [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapAudioConfig""", """ClapConfig""", """ClapTextConfig""", ], """processing_clap""": ["""ClapProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapModel""", """ClapPreTrainedModel""", """ClapTextModel""", """ClapTextModelWithProjection""", """ClapAudioModel""", """ClapAudioModelWithProjection""", ] snake_case = ["""ClapFeatureExtractor"""] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[List[PIL.Image.Image], np.ndarray] UpperCamelCase_ : Optional[List[bool]] UpperCamelCase_ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Any ): SCREAMING_SNAKE_CASE : int = "ZinengTang/tvlt-base" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() def _A ( self : List[str] , **UpperCAmelCase_ : Optional[Any] ): return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def _A ( self : List[str] , **UpperCAmelCase_ : Dict ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE : Optional[Any] = self.get_feature_extractor() SCREAMING_SNAKE_CASE : List[Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : Union[str, Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ ) self.assertIsInstance(processor.image_processor , UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : List[Any] = self.get_feature_extractor() SCREAMING_SNAKE_CASE : Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = np.ones([1_2000] ) SCREAMING_SNAKE_CASE : int = feature_extractor(UpperCAmelCase_ , return_tensors="np" ) SCREAMING_SNAKE_CASE : Tuple = processor(audio=UpperCAmelCase_ , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : str = self.get_image_processor() SCREAMING_SNAKE_CASE : str = self.get_feature_extractor() SCREAMING_SNAKE_CASE : Optional[int] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.ones([3, 224, 224] ) SCREAMING_SNAKE_CASE : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors="np" ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=UpperCAmelCase_ , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : str = self.get_image_processor() SCREAMING_SNAKE_CASE : str = self.get_feature_extractor() SCREAMING_SNAKE_CASE : Optional[int] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = np.ones([1_2000] ) SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([3, 224, 224] ) SCREAMING_SNAKE_CASE : Tuple = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor() SCREAMING_SNAKE_CASE : Optional[int] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
import numpy as np def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE : Tuple = ya SCREAMING_SNAKE_CASE : int = xa for k in range(lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = f(lowercase , y[k] ) SCREAMING_SNAKE_CASE : Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE : Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE : Optional[Any] = f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE : int = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case = logging.get_logger(__name__) snake_case = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''focalnet''' def __init__( self : int , UpperCAmelCase_ : Union[str, Any]=224 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Any=96 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=[192, 384, 768, 768] , UpperCAmelCase_ : Tuple=[2, 2, 6, 2] , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : Tuple=[3, 3, 3, 3] , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : str=4.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Union[str, Any]=1E-4 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=1E-5 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : List[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Tuple = patch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : List[Any] = embed_dim SCREAMING_SNAKE_CASE : Optional[int] = use_conv_embed SCREAMING_SNAKE_CASE : str = hidden_sizes SCREAMING_SNAKE_CASE : Optional[Any] = depths SCREAMING_SNAKE_CASE : Tuple = focal_levels SCREAMING_SNAKE_CASE : int = focal_windows SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = drop_path_rate SCREAMING_SNAKE_CASE : Dict = use_layerscale SCREAMING_SNAKE_CASE : Optional[Any] = layerscale_value SCREAMING_SNAKE_CASE : Any = use_post_layernorm SCREAMING_SNAKE_CASE : Dict = use_post_layernorm_in_modulation SCREAMING_SNAKE_CASE : Dict = normalize_modulator SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : List[Any] = encoder_stride SCREAMING_SNAKE_CASE : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from __future__ import annotations def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [True] * limit SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : int = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): SCREAMING_SNAKE_CASE : Optional[Any] = i * 2 while index < limit: SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : List[Any] = index + i SCREAMING_SNAKE_CASE : int = [2] for i in range(3 , lowercase , 2 ): if is_prime[i]: primes.append(lowercase ) return primes def lowerCamelCase__ ( lowercase = 1000000 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = prime_sieve(lowercase ) SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[Any] = 0 for i in range(len(lowercase ) ): for j in range(i + length , len(lowercase ) ): SCREAMING_SNAKE_CASE : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: SCREAMING_SNAKE_CASE : Dict = j - i SCREAMING_SNAKE_CASE : str = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def lowerCamelCase__ ( lowercase = "" ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" SCREAMING_SNAKE_CASE : Tuple = BeautifulSoup(requests.get(lowercase ).text , "html.parser" ) SCREAMING_SNAKE_CASE : Optional[int] = soup.find_all("td" , attrs="titleColumn" ) SCREAMING_SNAKE_CASE : List[Any] = soup.find_all("td" , class_="ratingColumn imdbRating" ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(lowercase , lowercase ) } def lowerCamelCase__ ( lowercase = "IMDb_Top_250_Movies.csv" ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = get_imdb_top_aaa_movies() with open(lowercase , "w" , newline="" ) as out_file: SCREAMING_SNAKE_CASE : Optional[int] = csv.writer(lowercase ) writer.writerow(["Movie title", "IMDb rating"] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case = { """configuration_mobilenet_v2""": [ """MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileNetV2Config""", """MobileNetV2OnnxConfig""", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MobileNetV2FeatureExtractor"""] snake_case = ["""MobileNetV2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileNetV2ForImageClassification""", """MobileNetV2ForSemanticSegmentation""", """MobileNetV2Model""", """MobileNetV2PreTrainedModel""", """load_tf_weights_in_mobilenet_v2""", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
1
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = None UpperCamelCase_ : int = BloomTokenizerFast UpperCamelCase_ : Optional[int] = BloomTokenizerFast UpperCamelCase_ : Any = True UpperCamelCase_ : int = False UpperCamelCase_ : List[str] = '''tokenizer_file''' UpperCamelCase_ : str = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def _A ( self : Tuple ): super().setUp() SCREAMING_SNAKE_CASE : List[str] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Tuple = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] SCREAMING_SNAKE_CASE : int = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_encode_plus(UpperCAmelCase_ )["input_ids"] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[Any]=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input SCREAMING_SNAKE_CASE : Any = "This is a simple input" SCREAMING_SNAKE_CASE : List[Any] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE : Optional[int] = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) tokenizer_r.encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding" ) SCREAMING_SNAKE_CASE : Optional[Any] = None # Hotfixing padding = None self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" ) # Simple input self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" ) # Simple input self.assertRaises( UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" , ) # Pair input self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" ) # Pair input self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" ) # Pair input self.assertRaises( UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" , ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : str = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = next(iter(UpperCAmelCase_ ) )["premise"] # pick up one data SCREAMING_SNAKE_CASE : Optional[int] = list(sample_data.values() ) SCREAMING_SNAKE_CASE : List[Any] = list(map(tokenizer.encode , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = [tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) for x in output_tokens] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : List[Any] ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
319
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
from __future__ import annotations import numpy as np def lowerCamelCase__ ( lowercase ): """simple docstring""" return np.maximum(0 , lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
319
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version snake_case = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""") snake_case = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def lowerCamelCase__ ( lowercase ): """simple docstring""" with open(lowercase , "rb" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(lowercase ) return im.convert("RGB" ) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={ '''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).''' } , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCamelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCamelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCamelCase_ : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCamelCase_ : Optional[int] = field( default=lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _A ( self : List[str] ): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : str = field( default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase )} , ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : str = field(default=lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCamelCase_ : bool = field( default=lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCamelCase_ : bool = field( default=lowerCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.stack([example["pixel_values"] for example in examples] ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , lowercase , lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: SCREAMING_SNAKE_CASE : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: SCREAMING_SNAKE_CASE : Optional[Any] = {} if data_args.train_dir is not None: SCREAMING_SNAKE_CASE : Any = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.validation_dir , "**" ) SCREAMING_SNAKE_CASE : str = load_dataset( "imagefolder" , data_files=lowercase , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. SCREAMING_SNAKE_CASE : Optional[int] = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0: SCREAMING_SNAKE_CASE : int = dataset["train"].train_test_split(data_args.train_val_split ) SCREAMING_SNAKE_CASE : Union[str, Any] = split["train"] SCREAMING_SNAKE_CASE : Dict = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. SCREAMING_SNAKE_CASE : Union[str, Any] = dataset["train"].features["labels"].names SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = {}, {} for i, label in enumerate(lowercase ): SCREAMING_SNAKE_CASE : Tuple = str(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = label # Load the accuracy metric from the datasets package SCREAMING_SNAKE_CASE : List[Any] = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel=lowercase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : Tuple = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size["shortest_edge"] else: SCREAMING_SNAKE_CASE : Optional[int] = (image_processor.size["height"], image_processor.size["width"]) SCREAMING_SNAKE_CASE : Tuple = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) SCREAMING_SNAKE_CASE : str = Compose( [ RandomResizedCrop(lowercase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) SCREAMING_SNAKE_CASE : str = Compose( [ Resize(lowercase ), CenterCrop(lowercase ), ToTensor(), normalize, ] ) def train_transforms(lowercase ): SCREAMING_SNAKE_CASE : Tuple = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(lowercase ): SCREAMING_SNAKE_CASE : Dict = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE : Any = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(lowercase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(lowercase ) # Initalize our trainer SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer( model=lowercase , args=lowercase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE : Optional[Any] = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE : List[Any] = last_checkpoint SCREAMING_SNAKE_CASE : Dict = trainer.train(resume_from_checkpoint=lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate() trainer.log_metrics("eval" , lowercase ) trainer.save_metrics("eval" , lowercase ) # Write model card and (optionally) push to hub SCREAMING_SNAKE_CASE : List[str] = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) if __name__ == "__main__": main()
319
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
1
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): SCREAMING_SNAKE_CASE : Dict = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: SCREAMING_SNAKE_CASE : Optional[Any] = subset[i - 1][j] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
319
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0 ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = row, column SCREAMING_SNAKE_CASE : Optional[int] = [[default_value for c in range(UpperCAmelCase_ )] for r in range(UpperCAmelCase_ )] def __str__( self : int ): SCREAMING_SNAKE_CASE : List[Any] = f'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier SCREAMING_SNAKE_CASE : int = 0 for row_vector in self.array: for obj in row_vector: SCREAMING_SNAKE_CASE : str = max(UpperCAmelCase_ , len(str(UpperCAmelCase_ ) ) ) SCREAMING_SNAKE_CASE : Any = f'''%{max_element_length}s''' # Make string and return def single_line(UpperCAmelCase_ : list[float] ) -> str: nonlocal string_format_identifier SCREAMING_SNAKE_CASE : Optional[Any] = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(UpperCAmelCase_ ) for row_vector in self.array ) return s def __repr__( self : str ): return str(self ) def _A ( self : Dict , UpperCAmelCase_ : tuple[int, int] ): if not (isinstance(UpperCAmelCase_ , (list, tuple) ) and len(UpperCAmelCase_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : List[str] , UpperCAmelCase_ : tuple[int, int] ): assert self.validate_indicies(UpperCAmelCase_ ) return self.array[loc[0]][loc[1]] def __setitem__( self : Tuple , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float ): assert self.validate_indicies(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = value def __add__( self : Dict , UpperCAmelCase_ : Matrix ): assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) assert self.row == another.row and self.column == another.column # Add SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE : Optional[int] = self[r, c] + another[r, c] return result def __neg__( self : Dict ): SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE : Tuple = -self[r, c] return result def __sub__( self : Dict , UpperCAmelCase_ : Matrix ): return self + (-another) def __mul__( self : List[str] , UpperCAmelCase_ : int | float | Matrix ): if isinstance(UpperCAmelCase_ , (int, float) ): # Scalar multiplication SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE : int = self[r, c] * another return result elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # Matrix multiplication assert self.column == another.row SCREAMING_SNAKE_CASE : List[Any] = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: SCREAMING_SNAKE_CASE : Union[str, Any] = f'''Unsupported type given for another ({type(UpperCAmelCase_ )})''' raise TypeError(UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[str] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE : str = self[r, c] return result def _A ( self : Any , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ): assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate SCREAMING_SNAKE_CASE : Union[str, Any] = v.transpose() SCREAMING_SNAKE_CASE : Tuple = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = Matrix(3 , 3 , 0 ) for i in range(3 ): SCREAMING_SNAKE_CASE : int = 1 print(F'''a^(-1) is {ainv}''' ) # u, v SCREAMING_SNAKE_CASE : str = Matrix(3 , 1 , 0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 2, -3 SCREAMING_SNAKE_CASE : str = Matrix(3 , 1 , 0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase , lowercase )}''' ) def lowerCamelCase__ ( ): """simple docstring""" import doctest doctest.testmod() testa()
319
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) SCREAMING_SNAKE_CASE : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) SCREAMING_SNAKE_CASE : Optional[int] = "xvjiarui/stable-diffusion-2-inpainting" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase_ , safety_checker=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench" SCREAMING_SNAKE_CASE : Optional[int] = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = 50 SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() SCREAMING_SNAKE_CASE : int = num_samples * [prompt] SCREAMING_SNAKE_CASE : int = num_samples * [init_image] SCREAMING_SNAKE_CASE : Any = num_samples * [mask_image] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = pipeline.prepare_inputs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # shard inputs and rng SCREAMING_SNAKE_CASE : List[Any] = replicate(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jax.random.split(UpperCAmelCase_ , jax.device_count() ) SCREAMING_SNAKE_CASE : int = shard(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = shard(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = shard(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = pipeline( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output.images.reshape(UpperCAmelCase_ , 512 , 512 , 3 ) SCREAMING_SNAKE_CASE : Optional[Any] = images[0, 253:256, 253:256, -1] SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE : Tuple = jnp.array( [0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
319
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
1
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
1
from __future__ import annotations def lowerCamelCase__ ( lowercase ): """simple docstring""" if len(lowercase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) SCREAMING_SNAKE_CASE : Union[str, Any] = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING snake_case = logging.get_logger(__name__) snake_case = Dict[str, Any] snake_case = List[Prediction] @add_end_docstrings(lowerCAmelCase ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ): super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _A ( self : str , **UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : Dict = {} if "threshold" in kwargs: SCREAMING_SNAKE_CASE : str = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ): return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : Any , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[str] = load_image(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = torch.IntTensor([[image.height, image.width]] ) SCREAMING_SNAKE_CASE : List[str] = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Union[str, Any] = target_size return inputs def _A ( self : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : str = model_inputs.pop("target_size" ) SCREAMING_SNAKE_CASE : Any = self.model(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: SCREAMING_SNAKE_CASE : int = model_inputs["bbox"] return model_outputs def _A ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=0.9 ): SCREAMING_SNAKE_CASE : Dict = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = target_size[0].tolist() def unnormalize(UpperCAmelCase_ : List[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) SCREAMING_SNAKE_CASE : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] SCREAMING_SNAKE_CASE : List[Any] = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs["bbox"].squeeze(0 )] SCREAMING_SNAKE_CASE : List[Any] = ["score", "label", "box"] SCREAMING_SNAKE_CASE : Union[str, Any] = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = raw_annotations[0] SCREAMING_SNAKE_CASE : List[str] = raw_annotation["scores"] SCREAMING_SNAKE_CASE : Dict = raw_annotation["labels"] SCREAMING_SNAKE_CASE : Dict = raw_annotation["boxes"] SCREAMING_SNAKE_CASE : List[Any] = scores.tolist() SCREAMING_SNAKE_CASE : str = [self.model.config.idalabel[label.item()] for label in labels] SCREAMING_SNAKE_CASE : Dict = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] SCREAMING_SNAKE_CASE : Any = ["score", "label", "box"] SCREAMING_SNAKE_CASE : str = [ dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def _A ( self : Optional[Any] , UpperCAmelCase_ : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = box.int().tolist() SCREAMING_SNAKE_CASE : List[Any] = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
319
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
from functools import lru_cache @lru_cache def lowerCamelCase__ ( lowercase ): """simple docstring""" if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
def lowerCamelCase__ ( lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE : Dict = str(abs(lowercase ) ) SCREAMING_SNAKE_CASE : Optional[int] = [list(lowercase ) for char in range(len(lowercase ) )] for index in range(len(lowercase ) ): num_transpositions[index].pop(lowercase ) return max( int("".join(list(lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
319
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowerCamelCase__ ( lowercase ): """simple docstring""" config.addinivalue_line( "markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" ) config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" ) config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" ) config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" ) def lowerCamelCase__ ( lowercase ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(lowercase , id=lowercase ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if exitstatus == 5: SCREAMING_SNAKE_CASE : List[Any] = 0 # Doctest custom flag to ignore output. snake_case = doctest.register_optionflag("""IGNORE_RESULT""") snake_case = doctest.OutputChecker class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) snake_case = CustomOutputChecker snake_case = HfDoctestModule snake_case = HfDocTestParser
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
1
from collections.abc import Iterable from typing import Generic, TypeVar snake_case = TypeVar("""_T""") class SCREAMING_SNAKE_CASE ( Generic[_T] ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : Iterable[_T] | None = None ): SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [] ) SCREAMING_SNAKE_CASE : list[_T] = [] def __len__( self : Dict ): return len(self._stacka ) + len(self._stacka ) def __repr__( self : str ): return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def _A ( self : List[Any] , UpperCAmelCase_ : _T ): self._stacka.append(UpperCAmelCase_ ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : str = self._stacka.pop SCREAMING_SNAKE_CASE : Tuple = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("Queue is empty" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
319
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
1
from __future__ import annotations from collections.abc import Callable snake_case = list[list[float | int]] def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = len(lowercase ) SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase )] SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : float for row in range(lowercase ): for col in range(lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = matrix[row][col] SCREAMING_SNAKE_CASE : str = vector[row][0] SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase , lowercase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE : List[Any] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowercase ): for row in range(lowercase ): SCREAMING_SNAKE_CASE : str = augmented[row][col] / augmented[col][col] for cola in range(lowercase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase ) ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = len(lowercase ) SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(lowercase )] for _ in range(lowercase )] SCREAMING_SNAKE_CASE : Matrix = [[0] for _ in range(lowercase )] SCREAMING_SNAKE_CASE : Matrix SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int for x_val, y_val in enumerate(lowercase ): for col in range(lowercase ): SCREAMING_SNAKE_CASE : Tuple = (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE : List[Any] = y_val SCREAMING_SNAKE_CASE : List[Any] = solve(lowercase , lowercase ) def interpolated_func(lowercase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowercase ) ) return interpolated_func def lowerCamelCase__ ( lowercase ): """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def lowerCamelCase__ ( lowercase = question_function , lowercase = 10 ): """simple docstring""" SCREAMING_SNAKE_CASE : list[int] = [func(lowercase ) for x_val in range(1 , order + 1 )] SCREAMING_SNAKE_CASE : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Callable[[int], int] SCREAMING_SNAKE_CASE : int for poly in polynomials: SCREAMING_SNAKE_CASE : Any = 1 while func(lowercase ) == poly(lowercase ): x_val += 1 ret += poly(lowercase ) return ret if __name__ == "__main__": print(F"""{solution() = }""")
319
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
1
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = XLMProphetNetTokenizer UpperCamelCase_ : str = False UpperCamelCase_ : Optional[Any] = True def _A ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[str] = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[Any] = "[PAD]" SCREAMING_SNAKE_CASE : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCAmelCase_ ) , 1012 ) def _A ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Optional[Any] = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def _A ( self : str ): return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : int = "Hello World!" SCREAMING_SNAKE_CASE : Optional[Any] = [3_5389, 6672, 49, 2] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def _A ( self : Dict ): # fmt: off SCREAMING_SNAKE_CASE : Tuple = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
319
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
1
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Tuple = LongformerTokenizer UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[Any] = LongformerTokenizerFast UpperCamelCase_ : int = True def _A ( self : Union[str, Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) SCREAMING_SNAKE_CASE : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE : str = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase_ ) ) def _A ( self : Any , **UpperCAmelCase_ : Optional[int] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : List[str] , **UpperCAmelCase_ : Tuple ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Optional[Any] = "lower newer" SCREAMING_SNAKE_CASE : int = "lower newer" return input_text, output_text def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Tuple = "lower newer" SCREAMING_SNAKE_CASE : str = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(UpperCAmelCase_ ) # , add_prefix_space=True) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase_ ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase_ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer.encode( "sequence builders" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[Any] = "Encode this sequence." SCREAMING_SNAKE_CASE : Dict = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : str = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = "Encode <mask> sequence" SCREAMING_SNAKE_CASE : Union[str, Any] = "Encode <mask>sequence" SCREAMING_SNAKE_CASE : int = tokenizer.encode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = encoded.index(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Tuple ): pass def _A ( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = "A, <mask> AllenNLP sentence." SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) SCREAMING_SNAKE_CASE : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _A ( self : Union[str, Any] ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase_ ) self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase_ ) self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : List[str] = f'''{text_of_1_token} {text_of_1_token}''' SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) SCREAMING_SNAKE_CASE : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ) + 1, 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ), 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ), 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
319
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 snake_case = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") snake_case = get_tests_dir("""fixtures/vocab.json""") snake_case = get_tests_dir("""fixtures""") class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Optional[int] = 0 def _A ( self : Any ): SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig() SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) # save in new folder model_config.save_pretrained(UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "vocab.json" ) ) SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor() SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) # save in new folder processor.save_pretrained(UpperCAmelCase_ ) # drop `processor_class` in tokenizer with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "r" ) as f: SCREAMING_SNAKE_CASE : List[Any] = json.load(UpperCAmelCase_ ) config_dict.pop("processor_class" ) with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" ) as f: f.write(json.dumps(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor() SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) # save in new folder processor.save_pretrained(UpperCAmelCase_ ) # drop `processor_class` in feature extractor with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "r" ) as f: SCREAMING_SNAKE_CASE : int = json.load(UpperCAmelCase_ ) config_dict.pop("processor_class" ) with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" ) as f: f.write(json.dumps(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" ) model_config.save_pretrained(UpperCAmelCase_ ) # copy relevant files copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "vocab.json" ) ) # create emtpy sample processor with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" ) as f: f.write("{}" ) SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : List[str] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) SCREAMING_SNAKE_CASE : List[Any] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) SCREAMING_SNAKE_CASE : str = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def _A ( self : List[str] ): try: AutoConfig.register("custom" , UpperCAmelCase_ ) AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE : Any = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase_ , "vocab.txt" ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE : List[str] = CustomTokenizer(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _A ( self : Optional[Any] ): class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = False class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = False class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = '''AutoFeatureExtractor''' UpperCamelCase_ : Any = '''AutoTokenizer''' UpperCamelCase_ : List[Any] = False try: AutoConfig.register("custom" , UpperCAmelCase_ ) AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # If remote code is not set, the default is to use local classes. SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _A ( self : Any ): SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" ) self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" ) @is_staging_test class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def _A ( cls : Tuple ): SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN HfFolder.save_token(UpperCAmelCase_ ) @classmethod def _A ( cls : Tuple ): try: delete_repo(token=cls._token , repo_id="test-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-processor" ) except HTTPError: pass def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase_ , "test-processor" ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase_ , "test-processor-org" ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token , organization="valid_org" , ) SCREAMING_SNAKE_CASE : Tuple = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _A ( self : Optional[int] ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : List[str] = os.path.join(UpperCAmelCase_ , "vocab.txt" ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE : Any = CustomTokenizer(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) SCREAMING_SNAKE_CASE : Dict = Repository(UpperCAmelCase_ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(UpperCAmelCase_ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(UpperCAmelCase_ , "tokenizer_config.json" ) ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(UpperCAmelCase_ ) self.assertDictEqual( tokenizer_config["auto_map"] , { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , "custom_feature_extraction.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , "custom_tokenization.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , "custom_processing.py" ) ) ) repo.push_to_hub() SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=UpperCAmelCase_ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
319
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
1
# Lint as: python3 import itertools import os import re snake_case = re.compile(r"""([A-Z]+)([A-Z][a-z])""") snake_case = re.compile(r"""([a-z\d])([A-Z])""") snake_case = re.compile(r"""(?<!_)_(?!_)""") snake_case = re.compile(r"""(_{2,})""") snake_case = r"""^\w+(\.\w+)*$""" snake_case = r"""<>:/\|?*""" def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = _uppercase_uppercase_re.sub(R"\1_\2" , lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = _lowercase_uppercase_re.sub(R"\1_\2" , lowercase ) return name.lower() def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = _single_underscore_re.split(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = [_multiple_underscores_re.split(lowercase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != "" ) def lowerCamelCase__ ( lowercase ): """simple docstring""" if os.path.basename(lowercase ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(lowercase ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.path.basename(lowercase ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re , lowercase ): raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return F'''{filename_prefix_for_name(lowercase )}-{split}''' def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = filename_prefix_for_split(lowercase , lowercase ) if filetype_suffix: prefix += F'''.{filetype_suffix}''' SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase , lowercase ) return F'''{filepath}*''' def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = filename_prefix_for_split(lowercase , lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowercase , lowercase ) if shard_lengths: SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase ) SCREAMING_SNAKE_CASE : Any = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )] if filetype_suffix: SCREAMING_SNAKE_CASE : List[str] = [filename + F'''.{filetype_suffix}''' for filename in filenames] return filenames else: SCREAMING_SNAKE_CASE : Optional[int] = prefix if filetype_suffix: filename += F'''.{filetype_suffix}''' return [filename]
319
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
1
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE : Tuple = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: SCREAMING_SNAKE_CASE : int = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE : Dict = tensor[:sequence_length] else: SCREAMING_SNAKE_CASE : List[Any] = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE : str = tensor[:sequence_length] else: SCREAMING_SNAKE_CASE : int = tensor[:sequence_length] return out_tensor.tolist() def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ord(_UpperCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True SCREAMING_SNAKE_CASE : Dict = unicodedata.category(_UpperCAmelCase ) if cat.startswith("P" ): return True return False @dataclass class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' UpperCamelCase_ : str = 4_2 UpperCamelCase_ : List[str] = True UpperCamelCase_ : Union[str, Any] = None UpperCamelCase_ : Tuple = None UpperCamelCase_ : str = -1_0_0 UpperCamelCase_ : Union[str, Any] = '''pt''' def _A ( self : Any , UpperCAmelCase_ : Dict ): import torch SCREAMING_SNAKE_CASE : Optional[int] = 'label' if 'label' in features[0].keys() else 'labels' SCREAMING_SNAKE_CASE : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.pad( __a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(batch["entity_ids"] ).shape[1] SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": SCREAMING_SNAKE_CASE : Tuple = [ list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels ] else: SCREAMING_SNAKE_CASE : str = [ [self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels ] SCREAMING_SNAKE_CASE : Optional[Any] = [feature['ner_tags'] for feature in features] SCREAMING_SNAKE_CASE : List[Any] = padding_tensor(__a , -1 , __a , __a ) SCREAMING_SNAKE_CASE : List[Any] = [feature['original_entity_spans'] for feature in features] SCREAMING_SNAKE_CASE : List[str] = padding_tensor(__a , (-1, -1) , __a , __a ) SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.tensor(__a , dtype=torch.intaa ) for k, v in batch.items()} return batch
350
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
0
import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase__ ( lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = 1.0E4 , lowercase = False , lowercase = 1.0 , ): """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even''' SCREAMING_SNAKE_CASE : int = float(embedding_dim // 2 ) SCREAMING_SNAKE_CASE : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) SCREAMING_SNAKE_CASE : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase__ , dtype=jnp.floataa ) * -log_timescale_increment ) SCREAMING_SNAKE_CASE : Dict = jnp.expand_dims(lowercase__ , 1 ) * jnp.expand_dims(lowercase__ , 0 ) # scale embeddings SCREAMING_SNAKE_CASE : List[str] = scale * emb if flip_sin_to_cos: SCREAMING_SNAKE_CASE : Optional[Any] = jnp.concatenate([jnp.cos(lowercase__ ), jnp.sin(lowercase__ )] , axis=1 ) else: SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate([jnp.sin(lowercase__ ), jnp.cos(lowercase__ )] , axis=1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.reshape(lowercase__ , [jnp.shape(lowercase__ )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' UpperCamelCase_ : int = 3_2 UpperCamelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__A ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.silu(__A ) SCREAMING_SNAKE_CASE : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__A ) return temb class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' UpperCamelCase_ : int = 3_2 UpperCamelCase_ : bool = False UpperCamelCase_ : float = 1 @nn.compact def __call__( self : int , UpperCAmelCase_ : str ): return get_sinusoidal_embeddings( __A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
351
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
0
"""simple docstring""" import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowerCamelCase__ ( lowercase ): """simple docstring""" config.addinivalue_line( "markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" ) config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" ) config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" ) config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" ) def lowerCamelCase__ ( lowercase ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE : Any = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if exitstatus == 5: SCREAMING_SNAKE_CASE : List[Any] = 0 # Doctest custom flag to ignore output. snake_case = doctest.register_optionflag("""IGNORE_RESULT""") snake_case = doctest.OutputChecker class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ): '''simple docstring''' def _A ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) snake_case = CustomOutputChecker snake_case = HfDoctestModule snake_case = HfDocTestParser
352
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters snake_case = (720, 1_280) # Height, Width snake_case = (0.4, 0.6) # if height or width lower than this scale, drop it. snake_case = 1 / 100 snake_case = """""" snake_case = """""" snake_case = """""" snake_case = 250 def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) for index in range(__lowerCAmelCase ): SCREAMING_SNAKE_CASE : Optional[int] = random.sample(range(len(__lowerCAmelCase ) ) , 4 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = update_image_and_anno( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , filter_scale=__lowerCAmelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Any = random_chars(32 ) SCREAMING_SNAKE_CASE : List[str] = path.split(os.sep )[-1].rsplit("." , 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(F'''{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) SCREAMING_SNAKE_CASE : List[str] = [] for anno in new_annos: SCREAMING_SNAKE_CASE : Union[str, Any] = anno[3] - anno[1] SCREAMING_SNAKE_CASE : Tuple = anno[4] - anno[2] SCREAMING_SNAKE_CASE : str = anno[1] + width / 2 SCREAMING_SNAKE_CASE : Any = anno[2] + height / 2 SCREAMING_SNAKE_CASE : Optional[int] = F'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(__lowerCAmelCase ) with open(F'''{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[Any] = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , "*.txt" ) ): SCREAMING_SNAKE_CASE : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(__lowerCAmelCase ) as in_file: SCREAMING_SNAKE_CASE : Optional[Any] = in_file.readlines() SCREAMING_SNAKE_CASE : List[str] = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Optional[int] = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Tuple = obj_list.rstrip("\n" ).split(" " ) SCREAMING_SNAKE_CASE : Any = float(obj[1] ) - float(obj[3] ) / 2 SCREAMING_SNAKE_CASE : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2 SCREAMING_SNAKE_CASE : Dict = float(obj[1] ) + float(obj[3] ) / 2 SCREAMING_SNAKE_CASE : List[str] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = 0.0 , ): """simple docstring""" SCREAMING_SNAKE_CASE : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) SCREAMING_SNAKE_CASE : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) SCREAMING_SNAKE_CASE : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) SCREAMING_SNAKE_CASE : Optional[Any] = int(scale_x * output_size[1] ) SCREAMING_SNAKE_CASE : List[Any] = int(scale_y * output_size[0] ) SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for i, index in enumerate(__lowerCAmelCase ): SCREAMING_SNAKE_CASE : List[Any] = all_img_list[index] path_list.append(__lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = all_annos[index] SCREAMING_SNAKE_CASE : Dict = cva.imread(__lowerCAmelCase ) if i == 0: # top-left SCREAMING_SNAKE_CASE : int = cva.resize(__lowerCAmelCase , (divid_point_x, divid_point_y) ) SCREAMING_SNAKE_CASE : int = img for bbox in img_annos: SCREAMING_SNAKE_CASE : Dict = bbox[1] * scale_x SCREAMING_SNAKE_CASE : str = bbox[2] * scale_y SCREAMING_SNAKE_CASE : Tuple = bbox[3] * scale_x SCREAMING_SNAKE_CASE : int = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right SCREAMING_SNAKE_CASE : Tuple = cva.resize(__lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) ) SCREAMING_SNAKE_CASE : Dict = img for bbox in img_annos: SCREAMING_SNAKE_CASE : List[str] = scale_x + bbox[1] * (1 - scale_x) SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[2] * scale_y SCREAMING_SNAKE_CASE : str = scale_x + bbox[3] * (1 - scale_x) SCREAMING_SNAKE_CASE : Dict = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left SCREAMING_SNAKE_CASE : Optional[Any] = cva.resize(__lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) ) SCREAMING_SNAKE_CASE : Tuple = img for bbox in img_annos: SCREAMING_SNAKE_CASE : Dict = bbox[1] * scale_x SCREAMING_SNAKE_CASE : str = scale_y + bbox[2] * (1 - scale_y) SCREAMING_SNAKE_CASE : str = bbox[3] * scale_x SCREAMING_SNAKE_CASE : Tuple = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right SCREAMING_SNAKE_CASE : str = cva.resize( __lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) SCREAMING_SNAKE_CASE : str = img for bbox in img_annos: SCREAMING_SNAKE_CASE : Tuple = scale_x + bbox[1] * (1 - scale_x) SCREAMING_SNAKE_CASE : Optional[int] = scale_y + bbox[2] * (1 - scale_y) SCREAMING_SNAKE_CASE : List[str] = scale_x + bbox[3] * (1 - scale_x) SCREAMING_SNAKE_CASE : int = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: SCREAMING_SNAKE_CASE : Optional[int] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowerCamelCase__ ( lowercase ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Union[str, Any] = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("""DONE ✅""")
353
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
0
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = AlbertTokenizer UpperCamelCase_ : List[Any] = AlbertTokenizerFast UpperCamelCase_ : Tuple = True UpperCamelCase_ : Tuple = True UpperCamelCase_ : Dict = True def _A ( self : int ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : str = AlbertTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self : int , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : Any = '''this is a test''' SCREAMING_SNAKE_CASE : Dict = '''this is a test''' return input_text, output_text def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Union[str, Any] = '''<pad>''' SCREAMING_SNAKE_CASE : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _A ( self : int ): SCREAMING_SNAKE_CASE : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "▁eloquent" ) self.assertEqual(len(_lowerCamelCase ) , 3_0000 ) def _A ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def _A ( self : str ): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : List[str] = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(_lowerCamelCase , ["▁this", "▁is", "▁a", "▁test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [48, 25, 21, 1289] ) SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _lowerCamelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Dict = AlbertTokenizer(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("sequence builders" ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode("multi-sequence build" ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Tuple = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
354
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
0
"""simple docstring""" snake_case = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
355
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
0
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = len(__snake_case ) SCREAMING_SNAKE_CASE : List[str] = len(__snake_case ) SCREAMING_SNAKE_CASE : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE : str = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE : Dict = True if a[i].islower(): SCREAMING_SNAKE_CASE : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
356
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
0
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def lowerCamelCase__ ( lowercase ): """simple docstring""" return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__A ) SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__A ) EnvironmentCommand.register_subcommand(__A ) TestCommand.register_subcommand(__A ) RunBeamCommand.register_subcommand(__A ) DummyDataCommand.register_subcommand(__A ) # Parse args SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_known_args() if not hasattr(__A , "func" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE : Dict = parse_unknown_args(__A ) # Run SCREAMING_SNAKE_CASE : Optional[Any] = args.func(__A , **__A ) service.run() if __name__ == "__main__": main()
357
def lowerCamelCase__ ( lowercase , lowercase = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = length or len(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE : str = True return list_data if not swapped else bubble_sort(lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
0
def lowerCamelCase__ ( lowercase ): """simple docstring""" if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(lowerCamelCase_ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
358
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE : '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(UpperCAmelCase_ ) def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): for processor in self: SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) SCREAMING_SNAKE_CASE : Optional[int] = temperature def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) SCREAMING_SNAKE_CASE : Optional[int] = top_p SCREAMING_SNAKE_CASE : str = filter_value SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] ) SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value ) SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 ) SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ ) # min tokens to keep SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1] return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = filter_value def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten() SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) return next_scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = bos_token_id def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = max_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) ) SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) SCREAMING_SNAKE_CASE : List[str] = min_length SCREAMING_SNAKE_CASE : Tuple = eos_token_id def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = begin_index def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ ) def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ): def _force_token(UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : List[str] = scores.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" ) SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) ) return new_scores SCREAMING_SNAKE_CASE : Any = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ): SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , ) return jnp.where( UpperCAmelCase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where( UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ ) return scores
319
0
from __future__ import annotations import os from collections.abc import Mapping snake_case = tuple[int, int] class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): UpperCamelCase__ : List[Any] = vertices UpperCamelCase__ : Union[str, Any] = { (min(__lowercase ), max(__lowercase )): weight for edge, weight in edges.items() } def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) UpperCamelCase__ : Tuple = weight def _A ( self : int ): UpperCamelCase__ : str = Graph({min(self.vertices )} , {} ) UpperCamelCase__ : List[str] = 42 UpperCamelCase__ : Optional[Any] = 42 UpperCamelCase__ : List[str] = 42 UpperCamelCase__ : List[Any] = 42 while len(subgraph.vertices ) < len(self.vertices ): UpperCamelCase__ : Optional[int] = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: UpperCamelCase__ : List[Any] = edge UpperCamelCase__ : Tuple = weight subgraph.add_edge(__lowercase , __lowercase ) return subgraph def lowerCamelCase__ ( lowercase = "p107_network.txt" ): """simple docstring""" UpperCamelCase__ : str = os.path.abspath(os.path.dirname(lowercase ) ) UpperCamelCase__ : Tuple = os.path.join(lowercase , lowercase ) UpperCamelCase__ : List[str] = {} UpperCamelCase__ : int = 42 UpperCamelCase__ : List[Any] = 42 UpperCamelCase__ : int = 42 with open(lowercase ) as f: UpperCamelCase__ : Tuple = f.read().strip().split("\n" ) UpperCamelCase__ : Optional[int] = [line.split("," ) for line in data] for edgea in range(1 , len(lowercase ) ): for edgea in range(lowercase ): if adjaceny_matrix[edgea][edgea] != "-": UpperCamelCase__ : List[Any] = int(adjaceny_matrix[edgea][edgea] ) UpperCamelCase__ : Any = Graph(set(range(len(lowercase ) ) ) , lowercase ) UpperCamelCase__ : Dict = graph.prims_algorithm() UpperCamelCase__ : List[Any] = sum(graph.edges.values() ) UpperCamelCase__ : Union[str, Any] = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"""{solution() = }""")
359
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys snake_case = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
319
0
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [0 for i in range(r + 1 )] # nc0 = 1 SCREAMING_SNAKE_CASE : Optional[int] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. SCREAMING_SNAKE_CASE : Optional[int] = min(__lowerCamelCase , __lowerCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
360
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase_ : str = True , UpperCAmelCase_ : Union[str, Any] = 1 / 255 , UpperCAmelCase_ : int = True , UpperCAmelCase_ : str = 8 , **UpperCAmelCase_ : Dict , ): super().__init__(**_a ) SCREAMING_SNAKE_CASE : Dict = do_rescale SCREAMING_SNAKE_CASE : str = rescale_factor SCREAMING_SNAKE_CASE : Tuple = do_pad SCREAMING_SNAKE_CASE : Tuple = pad_size def _A ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : List[str] ): return rescale(_a , scale=_a , data_format=_a , **_a ) def _A ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any = None ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_image_size(_a ) SCREAMING_SNAKE_CASE : int = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE : str = (old_width // size + 1) * size - old_width return pad(_a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=_a ) def _A ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Any] = ChannelDimension.FIRST , **UpperCAmelCase_ : Tuple , ): SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Tuple = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE : Optional[int] = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : str = [to_numpy_array(_a ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : int = [self.rescale(image=_a , scale=_a ) for image in images] if do_pad: SCREAMING_SNAKE_CASE : Tuple = [self.pad(_a , size=_a ) for image in images] SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images] SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": images} return BatchFeature(data=_a , tensor_type=_a )
361
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } snake_case = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = EfficientNetConfig() SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"] SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"] SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"] SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"] SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"] SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE : str = 1000 SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) ) SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} SCREAMING_SNAKE_CASE : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) SCREAMING_SNAKE_CASE : int = {} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1] SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight" SCREAMING_SNAKE_CASE : List[str] = "classifier.bias" return key_mapping def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE : str = key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) ) else: SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name]( include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , ) SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE : Tuple = param.numpy() SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase ) SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval() SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase ) SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"] SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase ) SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 ) SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") snake_case = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
0
def lowerCamelCase__ ( lowercase = 10 ): """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("Invalid input" ) SCREAMING_SNAKE_CASE : str = 10**n SCREAMING_SNAKE_CASE : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
362
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( __lowercase ): UpperCamelCase_ : Tuple = '''timm_backbone''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Union[str, Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = backbone SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = features_only SCREAMING_SNAKE_CASE : int = use_pretrained_backbone SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Dict = out_indices if out_indices is not None else (-1,)
363
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case = ["""small""", """medium""", """large"""] snake_case = """lm_head.decoder.weight""" snake_case = """lm_head.weight""" def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase ) SCREAMING_SNAKE_CASE : Any = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") snake_case = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
319
0
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase ): """simple docstring""" return int(x / 2**20 ) class SCREAMING_SNAKE_CASE : '''simple docstring''' def __enter__( self : Dict ): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.memory_allocated() return self def __exit__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] ): gc.collect() torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Dict = torch.cuda.memory_allocated() SCREAMING_SNAKE_CASE : List[str] = torch.cuda.max_memory_allocated() SCREAMING_SNAKE_CASE : int = bamb(self.end - self.begin ) SCREAMING_SNAKE_CASE : Union[str, Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCamelCase__ ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" , lowercase = 320 , lowercase = 160 , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(a_ ) SCREAMING_SNAKE_CASE : Tuple = load_dataset( "glue" , "mrpc" , split={"train": F'''train[:{n_train}]''', "validation": F'''validation[:{n_val}]'''} ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE : Any = datasets.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Optional[int] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(a_ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(a_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : List[Any] = DataLoader( tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) SCREAMING_SNAKE_CASE : Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : int = config["lr"] SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Tuple = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Any = args.model_name_or_path set_seed(a_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(a_ , a_ , a_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) SCREAMING_SNAKE_CASE : Any = optimizer_cls(params=model.parameters() , lr=a_ ) if accelerator.state.deepspeed_plugin is not None: SCREAMING_SNAKE_CASE : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : List[str] = (len(a_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): SCREAMING_SNAKE_CASE : Any = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , ) else: SCREAMING_SNAKE_CASE : str = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # We need to keep track of how many total steps we have iterated over SCREAMING_SNAKE_CASE : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly SCREAMING_SNAKE_CASE : List[Any] = 0 # Now we train the model SCREAMING_SNAKE_CASE : int = {} for epoch in range(a_ , a_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(a_ ): SCREAMING_SNAKE_CASE : List[str] = model(**a_ ) SCREAMING_SNAKE_CASE : Tuple = outputs.loss SCREAMING_SNAKE_CASE : List[str] = loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f: json.dump(a_ , a_ ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=a_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a_ , ) parser.add_argument( "--output_dir" , type=a_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--peak_memory_upper_bound" , type=a_ , default=a_ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , ) parser.add_argument( "--n_train" , type=a_ , default=320 , help="Number of training examples to use." , ) parser.add_argument( "--n_val" , type=a_ , default=160 , help="Number of validation examples to use." , ) parser.add_argument( "--num_epochs" , type=a_ , default=1 , help="Number of train epochs." , ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE : int = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(a_ , a_ ) if __name__ == "__main__": main()
364
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
0
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase__ ( lowercase ): """simple docstring""" return getitem, k def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return setitem, k, v def lowerCamelCase__ ( lowercase ): """simple docstring""" return delitem, k def lowerCamelCase__ ( lowercase , lowercase , *lowercase ): """simple docstring""" try: return fun(__a , *__a ), None except Exception as e: return None, e snake_case = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) snake_case = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] snake_case = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] snake_case = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] snake_case = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] snake_case = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HashMap(initial_block_size=4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = {} for _, (fun, *args) in enumerate(__a ): SCREAMING_SNAKE_CASE : List[Any] = _run_operation(__a , __a , *__a ) SCREAMING_SNAKE_CASE : List[Any] = _run_operation(__a , __a , *__a ) assert my_res == py_res assert str(__a ) == str(__a ) assert set(__a ) == set(__a ) assert len(__a ) == len(__a ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase__ ( ): """simple docstring""" def is_public(lowercase ) -> bool: return not name.startswith("_" ) SCREAMING_SNAKE_CASE : List[Any] = {name for name in dir({} ) if is_public(__a )} SCREAMING_SNAKE_CASE : Tuple = {name for name in dir(HashMap() ) if is_public(__a )} assert dict_public_names > hash_public_names
365
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
319
0
"""simple docstring""" def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = generate_pascal_triangle(lowerCAmelCase__ ) for row_idx in range(lowerCAmelCase__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def lowerCamelCase__ ( lowercase ): """simple docstring""" if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) SCREAMING_SNAKE_CASE : list[list[int]] = [] for current_row_idx in range(lowerCAmelCase__ ): SCREAMING_SNAKE_CASE : Optional[int] = populate_current_row(lowerCAmelCase__ , lowerCAmelCase__ ) triangle.append(lowerCAmelCase__ ) return triangle def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 SCREAMING_SNAKE_CASE : Tuple = 1, 1 for current_col_idx in range(1 , lowerCAmelCase__ ): calculate_current_element( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return current_row def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] SCREAMING_SNAKE_CASE : Dict = triangle[current_row_idx - 1][current_col_idx] SCREAMING_SNAKE_CASE : List[str] = above_to_left_elt + above_to_right_elt def lowerCamelCase__ ( lowercase ): """simple docstring""" if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) SCREAMING_SNAKE_CASE : list[list[int]] = [[1]] for row_index in range(1 , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE : Optional[int] = [0] + result[-1] + [0] SCREAMING_SNAKE_CASE : str = row_index + 1 # Calculate the number of distinct elements in a row SCREAMING_SNAKE_CASE : Dict = sum(divmod(lowerCAmelCase__ , 2 ) ) SCREAMING_SNAKE_CASE : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] SCREAMING_SNAKE_CASE : str = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() SCREAMING_SNAKE_CASE : Any = row_first_half + row_second_half result.append(lowerCAmelCase__ ) return result def lowerCamelCase__ ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowercase , lowercase ) -> None: SCREAMING_SNAKE_CASE : Optional[int] = F'''{func.__name__}({value})''' SCREAMING_SNAKE_CASE : Optional[Any] = timeit(F'''__main__.{call}''' , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
366
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = src_parent SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ ) return True def _A ( self : Tuple , UpperCAmelCase_ : int ): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
319
0
class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word SCREAMING_SNAKE_CASE : Union[str, Any] = is_leaf SCREAMING_SNAKE_CASE : Any = prefix def _A ( self : int , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Optional[int] = 0 for q, w in zip(self.prefix , lowerCAmelCase__ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def _A ( self : Dict , UpperCAmelCase_ : list[str] ): for word in words: self.insert(lowerCAmelCase__ ) def _A ( self : Dict , UpperCAmelCase_ : str ): if self.prefix == word: SCREAMING_SNAKE_CASE : Optional[int] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: SCREAMING_SNAKE_CASE : Dict = RadixNode(prefix=lowerCAmelCase__ , is_leaf=lowerCAmelCase__ ) else: SCREAMING_SNAKE_CASE : Dict = self.nodes[word[0]] SCREAMING_SNAKE_CASE : Optional[int] = incoming_node.match( lowerCAmelCase__ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCAmelCase__ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: SCREAMING_SNAKE_CASE : Optional[Any] = remaining_prefix SCREAMING_SNAKE_CASE : Optional[Any] = self.nodes[matching_string[0]] SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : int = aux_node if remaining_word == "": SCREAMING_SNAKE_CASE : Optional[Any] = True else: self.nodes[matching_string[0]].insert(lowerCAmelCase__ ) def _A ( self : str , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : int = self.nodes.get(word[0] , lowerCAmelCase__ ) if not incoming_node: return False else: SCREAMING_SNAKE_CASE : Dict = incoming_node.match( lowerCAmelCase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCAmelCase__ ) def _A ( self : List[Any] , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Optional[int] = self.nodes.get(word[0] , lowerCAmelCase__ ) if not incoming_node: return False else: SCREAMING_SNAKE_CASE : Dict = incoming_node.match( lowerCAmelCase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCAmelCase__ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: SCREAMING_SNAKE_CASE : Optional[int] = list(self.nodes.values() )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = merging_node.is_leaf self.prefix += merging_node.prefix SCREAMING_SNAKE_CASE : Any = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: SCREAMING_SNAKE_CASE : int = False # If there is 1 edge, we merge it with its child else: SCREAMING_SNAKE_CASE : str = list(incoming_node.nodes.values() )[0] SCREAMING_SNAKE_CASE : str = merging_node.is_leaf incoming_node.prefix += merging_node.prefix SCREAMING_SNAKE_CASE : Optional[int] = merging_node.nodes return True def _A ( self : Any , UpperCAmelCase_ : int = 0 ): if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : List[Any] = "banana bananas bandana band apple all beast".split() SCREAMING_SNAKE_CASE : Union[str, Any] = RadixNode() root.insert_many(a_ ) assert all(root.find(a_ ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def lowerCamelCase__ ( ): assert test_trie() def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Dict = RadixNode() SCREAMING_SNAKE_CASE : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(a_ ) print("Words:" , a_ ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
367
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
0
from __future__ import annotations snake_case = list[tuple[int, int]] snake_case = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , ): SCREAMING_SNAKE_CASE : Optional[int] = pos_x SCREAMING_SNAKE_CASE : Union[str, Any] = pos_y SCREAMING_SNAKE_CASE : Union[str, Any] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Any = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Any = g_cost SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : Union[str, Any] = self.calculate_heuristic() def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = abs(self.pos_x - self.goal_x ) SCREAMING_SNAKE_CASE : List[str] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : int , UpperCAmelCase_ : Dict ): return self.f_cost < other.f_cost class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Tuple = [self.start] SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = False def _A ( self : List[str] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE : Optional[int] = True return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : Tuple = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def _A ( self : Dict , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : Any = [] for action in delta: SCREAMING_SNAKE_CASE : Union[str, Any] = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def _A ( self : Any , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : List[Any] = node SCREAMING_SNAKE_CASE : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : List[Any] = current_node.parent path.reverse() return path if __name__ == "__main__": snake_case = (0, 0) snake_case = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") snake_case = GreedyBestFirst(init, goal) snake_case = greedy_bf.search() if path: for pos_x, pos_y in path: snake_case = 2 for elem in grid: print(elem)
368
from math import sqrt def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(1 , int(sqrt(lowercase ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase ): total += i + n // i elif i == sqrt(lowercase ): total += i return total - n def lowerCamelCase__ ( lowercase = 10000 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = sum( i for i in range(1 , lowercase ) if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
319
0
import copy import random from transformers import CLIPTokenizer class SCREAMING_SNAKE_CASE ( a__ ): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ): super().__init__(*_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = {} def _A ( self : List[Any] , UpperCAmelCase_ : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : int = super().add_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) if num_added_tokens == 0: raise ValueError( f'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' " `placeholder_token` that is not already in the tokenizer." ) def _A ( self : str , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=1 , **UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : Dict = [] if num_vec_per_token == 1: self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) output.append(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(_lowerCamelCase ): SCREAMING_SNAKE_CASE : Optional[int] = placeholder_token + f'''_{i}''' self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) output.append(_lowerCamelCase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f'''The tokenizer already has placeholder token {token} that can get confused with''' f''' {placeholder_token}keep placeholder tokens independent''' ) SCREAMING_SNAKE_CASE : int = output def _A ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Tuple=1.0 ): if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(len(_lowerCamelCase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCamelCase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: SCREAMING_SNAKE_CASE : str = self.token_map[placeholder_token] SCREAMING_SNAKE_CASE : int = tokens[: 1 + int(len(_lowerCamelCase ) * prop_tokens_to_load )] if vector_shuffle: SCREAMING_SNAKE_CASE : Tuple = copy.copy(_lowerCamelCase ) random.shuffle(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = text.replace(_lowerCamelCase , " ".join(_lowerCamelCase ) ) return text def __call__( self : int , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=1.0 , **UpperCAmelCase_ : int ): return super().__call__( self.replace_placeholder_tokens_in_text( _lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , ) def _A ( self : List[str] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Any ): return super().encode( self.replace_placeholder_tokens_in_text( _lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
369
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = val def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE : List[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE : List[Any] = value else: SCREAMING_SNAKE_CASE : Tuple = value return new_state_dict def lowerCamelCase__ ( lowercase , lowercase=False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = """""" if is_panoptic: SCREAMING_SNAKE_CASE : Any = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[:256] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-256:] def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: SCREAMING_SNAKE_CASE : List[str] = """resnet101""" if "dc5" in model_name: SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : List[Any] = """panoptic""" in model_name if is_panoptic: SCREAMING_SNAKE_CASE : List[Any] = 250 else: SCREAMING_SNAKE_CASE : Tuple = 91 SCREAMING_SNAKE_CASE : int = """huggingface/label-files""" SCREAMING_SNAKE_CASE : Tuple = """coco-detection-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[str] = idalabel SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()} # load image processor SCREAMING_SNAKE_CASE : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection""" SCREAMING_SNAKE_CASE : int = ConditionalDetrImageProcessor(format=__SCREAMING_SNAKE_CASE ) # prepare image SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ) SCREAMING_SNAKE_CASE : List[Any] = encoding["""pixel_values"""] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , __SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE : Dict = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: SCREAMING_SNAKE_CASE : List[str] = """conditional_detr.""" + src rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = rename_backbone_keys(__SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(__SCREAMING_SNAKE_CASE , is_panoptic=__SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE : Dict = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: SCREAMING_SNAKE_CASE : Any = state_dict.pop(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE : Optional[Any] = ConditionalDetrForSegmentation(__SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) model.eval() model.push_to_hub(repo_id=__SCREAMING_SNAKE_CASE , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion SCREAMING_SNAKE_CASE : Union[str, Any] = conditional_detr(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Tuple = model(__SCREAMING_SNAKE_CASE ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) snake_case = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
370
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: snake_case = None snake_case = logging.get_logger(__name__) snake_case = """▁""" snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } snake_case = { """google/pegasus-xsum""": 512, } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PegasusTokenizer UpperCamelCase_ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is''' f''' {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 ) ] if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : str = vocab_file SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
319
0
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
371
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class SCREAMING_SNAKE_CASE ( __lowercase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[Any] = data def __iter__( self : int ): for element in self.data: yield element def lowerCamelCase__ ( lowercase=True ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = Accelerator(even_batches=__a ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase = False ): """simple docstring""" if iterable: SCREAMING_SNAKE_CASE : Tuple = DummyIterableDataset(torch.as_tensor(range(__a ) ) ) else: SCREAMING_SNAKE_CASE : str = TensorDataset(torch.as_tensor(range(__a ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(__a , batch_size=__a ) SCREAMING_SNAKE_CASE : int = accelerator.prepare(__a ) return dl def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a ) SCREAMING_SNAKE_CASE : Optional[int] = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = create_accelerator(even_batches=__a ) verify_dataloader_batch_sizes( __a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = create_accelerator(even_batches=__a ) SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(__a ) SCREAMING_SNAKE_CASE : List[str] = create_dataloader(__a , dataset_size=3 , batch_size=1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__a ): SCREAMING_SNAKE_CASE : Union[str, Any] = ddp_model(batch[0].float() ) SCREAMING_SNAKE_CASE : Optional[int] = output.sum() loss.backward() batch_idxs.append(__a ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def lowerCamelCase__ ( lowercase ): """simple docstring""" with warnings.catch_warnings(record=__a ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __a ) assert "only supported for multi-GPU" in str(w[-1].message ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = create_accelerator(even_batches=__a ) SCREAMING_SNAKE_CASE : List[str] = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(__a ) SCREAMING_SNAKE_CASE : Optional[int] = create_dataloader(__a , dataset_size=3 , batch_size=1 ) SCREAMING_SNAKE_CASE : Dict = create_dataloader(__a , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): SCREAMING_SNAKE_CASE : Optional[Any] = train_dl.batch_sampler.even_batches SCREAMING_SNAKE_CASE : List[Any] = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Any = create_accelerator(even_batches=__a ) SCREAMING_SNAKE_CASE : List[str] = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__a ) create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a ) SCREAMING_SNAKE_CASE : Optional[int] = create_dataloader(__a , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): SCREAMING_SNAKE_CASE : Any = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = create_accelerator() SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE : Any = accelerator.prepare(__a ) create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a ) with warnings.catch_warnings(record=__a ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): pass assert issubclass(w[-1].category , __a ) assert "only supported for map-style datasets" in str(w[-1].message ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) SCREAMING_SNAKE_CASE : List[Any] = accelerator.state.distributed_type SCREAMING_SNAKE_CASE : Optional[int] = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__a ) SCREAMING_SNAKE_CASE : Any = original_state if __name__ == "__main__": main()
350
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case = 16 snake_case = 32 def lowerCamelCase__ ( lowercase , lowercase = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : List[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : str = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : Optional[Any] = 8 else: SCREAMING_SNAKE_CASE : Union[str, Any] = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": SCREAMING_SNAKE_CASE : int = 2 # New Code # SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : Any = config["lr"] SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase ): SCREAMING_SNAKE_CASE : Any = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = output.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) SCREAMING_SNAKE_CASE : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowercase ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
319
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : str = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE : List[str] = torch.optim.SGD(model.parameters() , 0.1 ) SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(lowercase_ ) try: pickle.loads(pickle.dumps(lowercase_ ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
351
import functools def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE : Dict = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
0
"""simple docstring""" import argparse import os import re snake_case = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict snake_case = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings snake_case = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""") def lowerCamelCase__ ( lowercase , lowercase = False ): """simple docstring""" with open(_snake_case , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : str = f.read() SCREAMING_SNAKE_CASE : List[Any] = content.split("\n" ) SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : List[Any] = 0 while line_idx < len(_snake_case ): if _re_intro_mapping.search(lines[line_idx] ) is not None: SCREAMING_SNAKE_CASE : List[str] = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 SCREAMING_SNAKE_CASE : str = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": SCREAMING_SNAKE_CASE : Optional[Any] = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers SCREAMING_SNAKE_CASE : Optional[int] = sorted(_snake_case , key=lambda lowercase : _re_identifier.search(_snake_case ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_snake_case , "w" , encoding="utf-8" ) as f: f.write("\n".join(_snake_case ) ) elif "\n".join(_snake_case ) != content: return True def lowerCamelCase__ ( lowercase = False ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [os.path.join(_snake_case , _snake_case ) for f in os.listdir(_snake_case ) if f.endswith(".py" )] SCREAMING_SNAKE_CASE : Optional[Any] = [sort_auto_mapping(_snake_case , overwrite=_snake_case ) for fname in fnames] if not overwrite and any(_snake_case ): SCREAMING_SNAKE_CASE : List[str] = [f for f, d in zip(_snake_case , _snake_case ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(_snake_case )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") snake_case = parser.parse_args() sort_all_auto_mappings(not args.check_only)
352
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) snake_case = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""BeitFeatureExtractor"""] snake_case = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
353
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) snake_case = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] snake_case = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" ) return sd def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue SCREAMING_SNAKE_CASE : Optional[Any] = key for name_pair in rename_keys_prefix: SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: SCREAMING_SNAKE_CASE : str = "pretraining" if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512} SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048} SCREAMING_SNAKE_CASE : Any = "vqa_advanced" elif "vqa" in checkpoint_path: SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129} SCREAMING_SNAKE_CASE : Tuple = "vqa" elif "nlvr" in checkpoint_path: SCREAMING_SNAKE_CASE : int = { "visual_embedding_dim": 1024, "num_labels": 2, } SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr" SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase ) # Load State Dict SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = '''openai-gpt''' UpperCamelCase_ : Any = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : List[str] , UpperCAmelCase_ : int=4_0478 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-5 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Tuple="cls_index" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=0.1 , **UpperCAmelCase_ : List[str] , ): SCREAMING_SNAKE_CASE : Tuple = vocab_size SCREAMING_SNAKE_CASE : str = n_positions SCREAMING_SNAKE_CASE : str = n_embd SCREAMING_SNAKE_CASE : Any = n_layer SCREAMING_SNAKE_CASE : Tuple = n_head SCREAMING_SNAKE_CASE : Dict = afn SCREAMING_SNAKE_CASE : Optional[Any] = resid_pdrop SCREAMING_SNAKE_CASE : int = embd_pdrop SCREAMING_SNAKE_CASE : int = attn_pdrop SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Tuple = summary_type SCREAMING_SNAKE_CASE : Any = summary_use_proj SCREAMING_SNAKE_CASE : Tuple = summary_activation SCREAMING_SNAKE_CASE : List[Any] = summary_first_dropout SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels super().__init__(**__UpperCAmelCase )
354
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''ClapFeatureExtractor''' UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def _A ( self : str ): SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
319
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ['''ConvNextFeatureExtractor'''] snake_case = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
355
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache" SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Optional[int] = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path elif issubclass(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ): """simple docstring""" assert isinstance(lowercase , lowercase ) for split in splits: SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / "cache" SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : str = ParquetDatasetReader( {"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache" SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Tuple = "train" SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path} SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache" SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) SCREAMING_SNAKE_CASE : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} ) SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" assert get_writer_batch_size(lowercase ) == expected
319
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) snake_case = logging.get_logger(__name__) snake_case = OrderedDict( [ ("""align""", """EfficientNetImageProcessor"""), ("""beit""", """BeitImageProcessor"""), ("""bit""", """BitImageProcessor"""), ("""blip""", """BlipImageProcessor"""), ("""blip-2""", """BlipImageProcessor"""), ("""bridgetower""", """BridgeTowerImageProcessor"""), ("""chinese_clip""", """ChineseCLIPImageProcessor"""), ("""clip""", """CLIPImageProcessor"""), ("""clipseg""", """ViTImageProcessor"""), ("""conditional_detr""", """ConditionalDetrImageProcessor"""), ("""convnext""", """ConvNextImageProcessor"""), ("""convnextv2""", """ConvNextImageProcessor"""), ("""cvt""", """ConvNextImageProcessor"""), ("""data2vec-vision""", """BeitImageProcessor"""), ("""deformable_detr""", """DeformableDetrImageProcessor"""), ("""deit""", """DeiTImageProcessor"""), ("""deta""", """DetaImageProcessor"""), ("""detr""", """DetrImageProcessor"""), ("""dinat""", """ViTImageProcessor"""), ("""donut-swin""", """DonutImageProcessor"""), ("""dpt""", """DPTImageProcessor"""), ("""efficientformer""", """EfficientFormerImageProcessor"""), ("""efficientnet""", """EfficientNetImageProcessor"""), ("""flava""", """FlavaImageProcessor"""), ("""focalnet""", """BitImageProcessor"""), ("""git""", """CLIPImageProcessor"""), ("""glpn""", """GLPNImageProcessor"""), ("""groupvit""", """CLIPImageProcessor"""), ("""imagegpt""", """ImageGPTImageProcessor"""), ("""instructblip""", """BlipImageProcessor"""), ("""layoutlmv2""", """LayoutLMv2ImageProcessor"""), ("""layoutlmv3""", """LayoutLMv3ImageProcessor"""), ("""levit""", """LevitImageProcessor"""), ("""mask2former""", """Mask2FormerImageProcessor"""), ("""maskformer""", """MaskFormerImageProcessor"""), ("""mgp-str""", """ViTImageProcessor"""), ("""mobilenet_v1""", """MobileNetV1ImageProcessor"""), ("""mobilenet_v2""", """MobileNetV2ImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevitv2""", """MobileViTImageProcessor"""), ("""nat""", """ViTImageProcessor"""), ("""oneformer""", """OneFormerImageProcessor"""), ("""owlvit""", """OwlViTImageProcessor"""), ("""perceiver""", """PerceiverImageProcessor"""), ("""pix2struct""", """Pix2StructImageProcessor"""), ("""poolformer""", """PoolFormerImageProcessor"""), ("""regnet""", """ConvNextImageProcessor"""), ("""resnet""", """ConvNextImageProcessor"""), ("""sam""", """SamImageProcessor"""), ("""segformer""", """SegformerImageProcessor"""), ("""swiftformer""", """ViTImageProcessor"""), ("""swin""", """ViTImageProcessor"""), ("""swin2sr""", """Swin2SRImageProcessor"""), ("""swinv2""", """ViTImageProcessor"""), ("""table-transformer""", """DetrImageProcessor"""), ("""timesformer""", """VideoMAEImageProcessor"""), ("""tvlt""", """TvltImageProcessor"""), ("""upernet""", """SegformerImageProcessor"""), ("""van""", """ConvNextImageProcessor"""), ("""videomae""", """VideoMAEImageProcessor"""), ("""vilt""", """ViltImageProcessor"""), ("""vit""", """ViTImageProcessor"""), ("""vit_hybrid""", """ViTHybridImageProcessor"""), ("""vit_mae""", """ViTImageProcessor"""), ("""vit_msn""", """ViTImageProcessor"""), ("""xclip""", """CLIPImageProcessor"""), ("""yolos""", """YolosImageProcessor"""), ] ) snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def lowerCamelCase__ ( lowercase ): """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE : List[str] = model_type_to_module_name(lowercase__ ) SCREAMING_SNAKE_CASE : List[str] = importlib.import_module(F'''.{module_name}''' , "transformers.models" ) try: return getattr(lowercase__ , lowercase__ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(lowercase__ , "__name__" , lowercase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE : List[str] = importlib.import_module("transformers" ) if hasattr(lowercase__ , lowercase__ ): return getattr(lowercase__ , lowercase__ ) return None def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = get_file_from_repo( lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead." ) return {} with open(lowercase__ , encoding="utf-8" ) as reader: return json.load(lowercase__ ) class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] ): raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(UpperCAmelCase_ ) def _A ( cls : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : int = kwargs.pop("config" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = kwargs.pop("trust_remote_code" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = config_dict.get("image_processor_type" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : List[str] = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE : Optional[int] = config_dict.pop("feature_extractor_type" , UpperCAmelCase_ ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : int = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] SCREAMING_SNAKE_CASE : Tuple = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(UpperCAmelCase_ , "image_processor_type" , UpperCAmelCase_ ) if hasattr(UpperCAmelCase_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE : List[str] = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: SCREAMING_SNAKE_CASE : int = image_processor_class_from_name(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = image_processor_auto_map is not None SCREAMING_SNAKE_CASE : Tuple = image_processor_class is not None or type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE : Union[str, Any] = resolve_trust_remote_code( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE : Tuple = get_class_from_dynamic_module( UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("code_revision" , UpperCAmelCase_ ) if os.path.isdir(UpperCAmelCase_ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) elif image_processor_class is not None: return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase_ )] return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def _A ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ): IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
356
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
319
0