code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np __A : str = re.compile(r"\b(a|an|the)\b", re.UNICODE) __A : List[str] = None def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' ) parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' ) parser.add_argument( '''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''' , '''-t''' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , ) parser.add_argument( '''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=_SCREAMING_SNAKE_CASE , help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _UpperCAmelCase = bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' def remove_articles(_SCREAMING_SNAKE_CASE : Dict ): return ARTICLES_REGEX.sub(''' ''' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Union[str, Any] ): _UpperCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if not s: return [] return normalize_answer(_SCREAMING_SNAKE_CASE ).split() def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = sum(common.values() ) if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _UpperCAmelCase = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = (2 * precision * recall) / (precision + recall) return fa def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = {} _UpperCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _UpperCAmelCase = qa['''id'''] _UpperCAmelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(_SCREAMING_SNAKE_CASE )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _UpperCAmelCase = [''''''] if qid not in preds: print(f'Missing prediction for {qid}' ) continue _UpperCAmelCase = preds[qid] # Take max over all gold answers _UpperCAmelCase = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) _UpperCAmelCase = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) return exact_scores, fa_scores def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = {} for qid, s in scores.items(): _UpperCAmelCase = na_probs[qid] > na_prob_thresh if pred_na: _UpperCAmelCase = float(not qid_to_has_ans[qid] ) else: _UpperCAmelCase = s return new_scores def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None ): '''simple docstring''' if not qid_list: _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ('''exact''', 100.0 * sum(exact_scores.values() ) / total), ('''f1''', 100.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' for k in new_eval: _UpperCAmelCase = new_eval[k] def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''b''' , alpha=0.2 , where='''post''' ) plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='''post''' , alpha=0.2 , color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_SCREAMING_SNAKE_CASE ) plt.savefig(_SCREAMING_SNAKE_CASE ) plt.clf() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' _UpperCAmelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) _UpperCAmelCase = 0.0 _UpperCAmelCase = 1.0 _UpperCAmelCase = 0.0 _UpperCAmelCase = [1.0] _UpperCAmelCase = [0.0] _UpperCAmelCase = 0.0 for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid_to_has_ans[qid]: true_pos += scores[qid] _UpperCAmelCase = true_pos / float(i + 1 ) _UpperCAmelCase = true_pos / float(_SCREAMING_SNAKE_CASE ) if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_SCREAMING_SNAKE_CASE ) recalls.append(_SCREAMING_SNAKE_CASE ) if out_image: plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {"ap": 100.0 * avg_prec} def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _UpperCAmelCase = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , ) _UpperCAmelCase = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , ) _UpperCAmelCase = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()} _UpperCAmelCase = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''pr_exact''' ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''pr_f1''' ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''pr_oracle''' ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' if not qid_list: return _UpperCAmelCase = [na_probs[k] for k in qid_list] _UpperCAmelCase = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) ) plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(f'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , f'na_prob_hist_{name}.png' ) ) plt.clf() def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _UpperCAmelCase = num_no_ans _UpperCAmelCase = cur_score _UpperCAmelCase = 0.0 _UpperCAmelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid not in scores: continue if qid_to_has_ans[qid]: _UpperCAmelCase = scores[qid] else: if preds[qid]: _UpperCAmelCase = -1 else: _UpperCAmelCase = 0 cur_score += diff if cur_score > best_score: _UpperCAmelCase = cur_score _UpperCAmelCase = na_probs[qid] return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = best_exact _UpperCAmelCase = exact_thresh _UpperCAmelCase = best_fa _UpperCAmelCase = fa_thresh def lowercase ( ): '''simple docstring''' with open(OPTS.data_file ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = dataset_json['''data'''] with open(OPTS.pred_file ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = {k: 0.0 for k in preds} _UpperCAmelCase = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False _UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if v] _UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if not v] _UpperCAmelCase , _UpperCAmelCase = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) _UpperCAmelCase = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) _UpperCAmelCase = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if has_ans_qids: _UpperCAmelCase = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''HasAns''' ) if no_ans_qids: _UpperCAmelCase = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , '''hasAns''' ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) ) if __name__ == "__main__": __A : Dict = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
260
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
1
"""simple docstring""" import cmath import math def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): '''simple docstring''' _UpperCAmelCase = math.radians(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = math.radians(_SCREAMING_SNAKE_CASE ) # Convert voltage and current to rectangular form _UpperCAmelCase = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
260
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
1
"""simple docstring""" from ....utils import logging __A : Union[str, Any] = logging.get_logger(__name__) class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=2_0_4_8 )->List[Any]: _UpperCAmelCase = config.__dict__ _UpperCAmelCase = modal_hidden_size if num_labels: _UpperCAmelCase = num_labels
260
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
1
"""simple docstring""" import os def lowercase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , '''num.txt''' ) with open(_SCREAMING_SNAKE_CASE ) as file_hand: return str(sum(int(_SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
260
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
1
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline __A : Tuple = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": __A : Optional[Any] = "hopper-medium-v2" __A : Optional[Any] = gym.make(env_name) __A : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) __A : Dict = env.reset() __A : List[str] = 0 __A : Tuple = 0 __A : Tuple = 1000 __A : Tuple = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy __A : Dict = pipeline(obs, planning_horizon=32) # execute action in environment __A , __A , __A , __A : int = env.step(denorm_actions) __A : int = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) __A : List[Any] = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
260
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : Optional[int] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
260
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = ["""pixel_values"""] def __init__( self : Union[str, Any] , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : bool = True , **__UpperCamelCase : List[Any] , )->None: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_2_4} _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD _UpperCAmelCase = do_convert_rgb def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Tuple , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[int] , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->Optional[Any]: return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : str , )->np.ndarray: return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : int = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : Dict , )->PIL.Image.Image: _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''size''' , default_to_square=__UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' , default_to_square=__UpperCamelCase ) _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCAmelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCAmelCase = [convert_to_rgb(__UpperCamelCase ) for image in images] # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
260
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[int] = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """open-llama""" def __init__( self : Any , __UpperCamelCase : int=1_0_0_0_0_0 , __UpperCamelCase : str=4_0_9_6 , __UpperCamelCase : Optional[int]=1_1_0_0_8 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : str=3_2 , __UpperCamelCase : str="silu" , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : List[str]=1e-6 , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : str=2 , __UpperCamelCase : int=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Tuple , )->List[Any]: _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( '''use_memorry_efficient_attention''' , __UpperCamelCase ) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Optional[int] )->List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'got {self.rope_scaling}' ) _UpperCAmelCase = self.rope_scaling.get('''type''' , __UpperCamelCase ) _UpperCAmelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
260
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
1
"""simple docstring""" from __future__ import annotations __A : str = [] def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' for i in range(len(_SCREAMING_SNAKE_CASE ) ): if board[row][i] == 1: return False for i in range(len(_SCREAMING_SNAKE_CASE ) ): if board[i][column] == 1: return False for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ): if board[i][j] == 1: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if row >= len(_SCREAMING_SNAKE_CASE ): solution.append(_SCREAMING_SNAKE_CASE ) printboard(_SCREAMING_SNAKE_CASE ) print() return True for i in range(len(_SCREAMING_SNAKE_CASE ) ): if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 1 solve(_SCREAMING_SNAKE_CASE , row + 1 ) _UpperCAmelCase = 0 return False def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] ): '''simple docstring''' for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in range(len(_SCREAMING_SNAKE_CASE ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __A : List[Any] = 8 __A : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
260
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : List[Any] = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """gpt_neox""" def __init__( self : Tuple , __UpperCamelCase : Tuple=5_0_4_3_2 , __UpperCamelCase : Optional[int]=6_1_4_4 , __UpperCamelCase : Optional[int]=4_4 , __UpperCamelCase : str=6_4 , __UpperCamelCase : Tuple=2_4_5_7_6 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.2_5 , __UpperCamelCase : Dict=1_0_0_0_0 , __UpperCamelCase : str=0.0 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[Any]=2_0_4_8 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : str=1e-5 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Dict , )->List[Any]: super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = rotary_pct _UpperCAmelCase = rotary_emb_base _UpperCAmelCase = attention_dropout _UpperCAmelCase = hidden_dropout _UpperCAmelCase = classifier_dropout _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = tie_word_embeddings _UpperCAmelCase = use_parallel_residual _UpperCAmelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def lowercase__ ( self : List[str] )->Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'got {self.rope_scaling}' ) _UpperCAmelCase = self.rope_scaling.get('''type''' , __UpperCamelCase ) _UpperCAmelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
260
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' if height >= 1: move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' print('''moving disk from''' , _SCREAMING_SNAKE_CASE , '''to''' , _SCREAMING_SNAKE_CASE ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = int(input('''Height of hanoi: ''' ).strip() ) move_tower(_SCREAMING_SNAKE_CASE , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
260
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
1
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __A : Tuple = imread(r"digital_image_processing/image_data/lena_small.jpg") __A : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = cn.convert_to_negative(_SCREAMING_SNAKE_CASE ) # assert negative_img array for at least one True assert negative_img.any() def lowercase ( ): '''simple docstring''' with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 110 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() _UpperCAmelCase = canny.canny(_SCREAMING_SNAKE_CASE ) # assert canny array for at least one True assert canny_array.any() def lowercase ( ): '''simple docstring''' assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) _UpperCAmelCase = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE ) assert res.any() def lowercase ( ): '''simple docstring''' assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any() def lowercase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = sob.sobel_filter(_SCREAMING_SNAKE_CASE ) assert grad.any() and theta.any() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 ) assert sepia.all() def lowercase ( _SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' _UpperCAmelCase = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowercase ( _SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' _UpperCAmelCase = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. _UpperCAmelCase = imread(_SCREAMING_SNAKE_CASE , 0 ) # Test for get_neighbors_pixel function() return not None _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = image[x_coordinate][y_coordinate] _UpperCAmelCase = lbp.get_neighbors_pixel( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _UpperCAmelCase = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert lbp_image.any()
260
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
1
"""simple docstring""" import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): __A : Union[str, Any] = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: __A : List[str] = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _UpperCAmelCase = numpy_to_pil(_SCREAMING_SNAKE_CASE ) return images def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' if images.ndim == 3: _UpperCAmelCase = images[None, ...] _UpperCAmelCase = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _UpperCAmelCase = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _UpperCAmelCase = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for image in images] return pil_images
260
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
1
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , '''depth_multiplier''' ) ) class _a : """simple docstring""" def __init__( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str=1_3 , __UpperCamelCase : int=3 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : Optional[Any]=0.2_5 , __UpperCamelCase : Tuple=8 , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=1_0_2_4 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : Dict="relu6" , __UpperCamelCase : str=0.1 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Optional[Any]=None , )->Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = depth_multiplier _UpperCAmelCase = min_depth _UpperCAmelCase = tf_padding _UpperCAmelCase = int(last_hidden_size * depth_multiplier ) _UpperCAmelCase = output_stride _UpperCAmelCase = hidden_act _UpperCAmelCase = classifier_dropout_prob _UpperCAmelCase = use_labels _UpperCAmelCase = is_training _UpperCAmelCase = num_labels _UpperCAmelCase = initializer_range _UpperCAmelCase = scope def lowercase__ ( self : Tuple )->str: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase__ ( self : Any )->Optional[int]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] )->Union[str, Any]: _UpperCAmelCase = MobileNetVaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] )->int: _UpperCAmelCase = self.num_labels _UpperCAmelCase = MobileNetVaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Union[str, Any] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () UpperCamelCase__ = ( {"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = MobileNetVaModelTester(self ) _UpperCAmelCase = MobileNetVaConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->str: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def lowercase__ ( self : Dict )->Union[str, Any]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def lowercase__ ( self : Any )->Tuple: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def lowercase__ ( self : Optional[int] )->str: pass def lowercase__ ( self : Union[str, Any] )->str: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->List[str]: def check_hidden_states_output(__UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ): _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = 2_6 self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->Optional[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = MobileNetVaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _a ( unittest.TestCase): """simple docstring""" @cached_property def lowercase__ ( self : List[Any] )->List[str]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(__UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
260
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
1
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def lowercase ( _SCREAMING_SNAKE_CASE : bool = True , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) _UpperCAmelCase = False if main_process_only: _UpperCAmelCase = PartialState().local_process_index == 0 return _tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , disable=_SCREAMING_SNAKE_CASE )
260
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( lowerCAmelCase , lowerCAmelCase): """simple docstring""" @register_to_config def __init__( self : Optional[Any] , __UpperCamelCase : int = 7_6_8 , )->List[str]: super().__init__() _UpperCAmelCase = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) ) _UpperCAmelCase = nn.Parameter(torch.ones(1 , __UpperCamelCase ) ) def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Union[str, torch.device]] = None , __UpperCamelCase : Optional[torch.dtype] = None , )->List[Any]: _UpperCAmelCase = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) ) _UpperCAmelCase = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) ) return self def lowercase__ ( self : str , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = (embeds - self.mean) * 1.0 / self.std return embeds def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] )->Dict: _UpperCAmelCase = (embeds * self.std) + self.mean return embeds
260
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCamelCase__ = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCamelCase__ = """document_qa""" UpperCamelCase__ = AutoProcessor UpperCamelCase__ = VisionEncoderDecoderModel UpperCamelCase__ = ["""image""", """text"""] UpperCamelCase__ = ["""text"""] def __init__( self : str , *__UpperCamelCase : List[str] , **__UpperCamelCase : Union[str, Any] )->str: if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Tuple , __UpperCamelCase : "Image" , __UpperCamelCase : str )->Union[str, Any]: _UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' _UpperCAmelCase = task_prompt.replace('''{user_input}''' , __UpperCamelCase ) _UpperCAmelCase = self.pre_processor.tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors='''pt''' ).input_ids _UpperCAmelCase = self.pre_processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any )->Optional[Any]: return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCamelCase , ).sequences def lowercase__ ( self : str , __UpperCamelCase : Any )->Optional[Any]: _UpperCAmelCase = self.pre_processor.batch_decode(__UpperCamelCase )[0] _UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) _UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) _UpperCAmelCase = re.sub(r'''<.*?>''' , '''''' , __UpperCamelCase , count=1 ).strip() # remove first task start token _UpperCAmelCase = self.pre_processor.tokenajson(__UpperCamelCase ) return sequence["answer"]
260
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowercase ( ): '''simple docstring''' print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
260
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
1
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCAmelCase__ = Lock() def _a ( a :Optional[int] , a :List[Any] , a :List[str] , a :Union[str, Any] , a :Tuple , a :Union[str, Any] , a :List[Any] ) -> Tuple: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(a ) process_lock.release() # receive your right neighbor's value process_lock.acquire() a = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left a = min(a , a ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(a ) process_lock.release() # receive your left neighbor's value process_lock.acquire() a = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right a = max(a , a ) # after all swaps are performed, send the values back to main result_pipe[1].send(a ) def _a ( a :Union[str, Any] ) -> Optional[Any]: a = [] a = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop a = Pipe() a = Pipe() process_array_.append( Process( target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) a = temp_rs a = temp_rr for i in range(1 , len(a ) - 1 ): a = Pipe() a = Pipe() process_array_.append( Process( target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) a = temp_rs a = temp_rr process_array_.append( Process( target=a , args=( len(a ) - 1, arr[len(a ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(a ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(a ) ): a = result_pipe[p][0].recv() process_array_[p].join() return arr def _a ( ) -> int: a = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*a ) a = odd_even_transposition(a ) print('''Sorted List\n''' ) print(*a ) if __name__ == "__main__": main()
0
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_: List[Any] ={ 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: List[str] =[ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : '''simple docstring''' def __init__(self : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=3 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : str=10 , UpperCamelCase : Any=[10, 20, 30, 40] , UpperCamelCase : List[Any]=[1, 1, 2, 1] , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=True , UpperCamelCase : Optional[int]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = embeddings_size lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = scope lowercase__ = len(UpperCamelCase ) def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase__ (self : str ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Optional[Any] ): '''simple docstring''' lowercase__ = TFResNetModel(config=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = TFResNetForImageClassification(UpperCamelCase ) lowercase__ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowerCAmelCase__ : str = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : int = False lowerCAmelCase__ : Optional[int] = False def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = TFResNetModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): lowercase__ = model_class(UpperCamelCase ) lowercase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ = layer_type lowercase__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCamelCase__ (self : Any ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFResNetModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def _SCREAMING_SNAKE_CASE () -> Dict: """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ (self : int ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''tf''' ) # forward pass lowercase__ = model(**UpperCamelCase ) # verify the logits lowercase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowercase__ = tf.constant([-11.10_69, -9.78_77, -8.37_77] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase , atol=1E-4 ) )
2
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
'''simple docstring''' class UpperCAmelCase_ : def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] ) -> str: # we need a list not a string, so do something to change the type lowerCAmelCase = arr.split(',' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: lowerCAmelCase = [int(self.array[0] )] * len(self.array ) lowerCAmelCase = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCAmelCase = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCAmelCase = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __snake_case =input("""please input some numbers:""") __snake_case =SubArray(whole_array) __snake_case =array.solve_sub_array() print(("""the results is:""", re))
4
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 UpperCAmelCase__ = { '''return_dict''': False, '''output_hidden_states''': True, '''output_attentions''': True, '''torchscript''': True, '''torch_dtype''': '''float16''', '''use_bfloat16''': True, '''tf_legacy_loss''': True, '''pruned_heads''': {'''a''': 1}, '''tie_word_embeddings''': False, '''is_decoder''': True, '''cross_attention_hidden_size''': 128, '''add_cross_attention''': True, '''tie_encoder_decoder''': True, '''max_length''': 50, '''min_length''': 3, '''do_sample''': True, '''early_stopping''': True, '''num_beams''': 3, '''num_beam_groups''': 3, '''diversity_penalty''': 0.5, '''temperature''': 2.0, '''top_k''': 10, '''top_p''': 0.7, '''typical_p''': 0.2, '''repetition_penalty''': 0.8, '''length_penalty''': 0.8, '''no_repeat_ngram_size''': 5, '''encoder_no_repeat_ngram_size''': 5, '''bad_words_ids''': [1, 2, 3], '''num_return_sequences''': 3, '''chunk_size_feed_forward''': 5, '''output_scores''': True, '''return_dict_in_generate''': True, '''forced_bos_token_id''': 2, '''forced_eos_token_id''': 3, '''remove_invalid_values''': True, '''architectures''': ['''BertModel'''], '''finetuning_task''': '''translation''', '''id2label''': {0: '''label'''}, '''label2id''': {'''label''': '''0'''}, '''tokenizer_class''': '''BertTokenizerFast''', '''prefix''': '''prefix''', '''bos_token_id''': 6, '''pad_token_id''': 7, '''eos_token_id''': 8, '''sep_token_id''': 9, '''decoder_start_token_id''': 10, '''exponential_decay_length_penalty''': (5, 1.01), '''suppress_tokens''': [0, 1], '''begin_suppress_tokens''': 2, '''task_specific_params''': {'''translation''': '''some_params'''}, '''problem_type''': '''regression''', } @is_staging_test class lowerCamelCase__ ( unittest.TestCase): @classmethod def __A (cls ) -> List[str]: _lowercase =TOKEN HfFolder.save_token(UpperCAmelCase ) @classmethod def __A (cls ) -> Dict: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __A (self ) -> Dict: _lowercase =BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) _lowercase =BertConfig.from_pretrained(f"{USER}/test-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase , repo_id='''test-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token ) _lowercase =BertConfig.from_pretrained(f"{USER}/test-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) def __A (self ) -> Optional[Any]: _lowercase =BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) _lowercase =BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase , repo_id='''valid_org/test-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token ) _lowercase =BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) def __A (self ) -> str: CustomConfig.register_for_auto_class() _lowercase =CustomConfig(attribute=4_2 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) _lowercase =AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=UpperCAmelCase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 4_2 ) class lowerCamelCase__ ( unittest.TestCase): def __A (self ) -> Any: _lowercase =GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated _lowercase =c.n_embd + 1 # int _lowercase =c.resid_pdrop + 1.0 # float _lowercase =not c.scale_attn_weights # bool _lowercase =c.summary_type + '''foo''' # str c.update_from_string( f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" ) self.assertEqual(UpperCAmelCase , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(UpperCAmelCase , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(UpperCAmelCase , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(UpperCAmelCase , c.summary_type , '''mismatch for key: summary_type''' ) def __A (self ) -> Union[str, Any]: _lowercase =PretrainedConfig() _lowercase =[key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( UpperCAmelCase , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) _lowercase =[key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )] if len(UpperCAmelCase ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f" {', '.join(UpperCAmelCase )}." ) def __A (self ) -> Optional[int]: with self.assertRaises(UpperCAmelCase ): # config is in subfolder, the following should not work without specifying the subfolder _lowercase =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) _lowercase =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(UpperCAmelCase ) def __A (self ) -> List[str]: # A mock response for an HTTP head request to emulate server down _lowercase =mock.Mock() _lowercase =5_0_0 _lowercase ={} _lowercase =HTTPError _lowercase ={} # Download this model to make sure it's in the cache. _lowercase =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head: _lowercase =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __A (self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 _lowercase =BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __A (self ) -> Any: _lowercase =AutoConfig.from_pretrained('''bert-base-cased''' ) _lowercase =['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(UpperCAmelCase ) _lowercase =2 json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 _lowercase =AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 _lowercase =['''config.42.0.0.json'''] _lowercase =7_6_8 configuration.save_pretrained(UpperCAmelCase ) shutil.move(os.path.join(UpperCAmelCase , '''config.4.0.0.json''' ) , os.path.join(UpperCAmelCase , '''config.42.0.0.json''' ) ) _lowercase =AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 7_6_8 ) def __A (self ) -> List[Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. _lowercase ='''hf-internal-testing/test-two-configs''' import transformers as new_transformers _lowercase ='''v4.0.0''' _lowercase , _lowercase =new_transformers.models.auto.AutoConfig.from_pretrained( UpperCAmelCase , return_unused_kwargs=UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(UpperCAmelCase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers _lowercase ='''v3.0.0''' _lowercase =old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(old_configuration.hidden_size , 7_6_8 )
5
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 - _cos) / 2 __a = 1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 + _cos) / 2 __a = -1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = _sin / 2 __a = 0 __a = -ba __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 1 - alpha __a = -2 * _cos __a = 1 + alpha __a = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = 1 + alpha * big_a __a = -2 * _cos __a = 1 - alpha * big_a __a = 1 + alpha / big_a __a = -2 * _cos __a = 1 - alpha / big_a __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (pmc + aaa) __a = 2 * big_a * mpc __a = big_a * (pmc - aaa) __a = ppmc + aaa __a = -2 * pmpc __a = ppmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (ppmc + aaa) __a = -2 * big_a * pmpc __a = big_a * (ppmc - aaa) __a = pmc + aaa __a = 2 * mpc __a = pmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class A ( unittest.TestCase , _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = load_tool('text-to-speech' ) self.tool.setup() def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) A__ = self.tool('hey' ) A__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3],torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ),) ) def snake_case__ ( self : int )-> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) A__ = self.tool('hey' ) A__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3],torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ),) )
7
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
8
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __lowerCAmelCase : Optional[Any] =5_0_0_0_0_0 __lowerCAmelCase ,__lowerCAmelCase : List[Any] =os.path.split(__file__) __lowerCAmelCase : Tuple =os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def _UpperCamelCase ( lowercase__ , **lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = dataset.map(**lowercase__ ) @get_duration def _UpperCamelCase ( lowercase__ , **lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = dataset.filter(**lowercase__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Tuple = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE : Tuple = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) __SCREAMING_SNAKE_CASE : Dict = generate_example_dataset( os.path.join(lowercase__ , '''dataset.arrow''' ) , lowercase__ , num_examples=lowercase__ ) __SCREAMING_SNAKE_CASE : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowercase__ ) def tokenize(lowercase__ ): return tokenizer(examples['''text'''] ) __SCREAMING_SNAKE_CASE : List[str] = map(lowercase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = map(lowercase__ , batched=lowercase__ ) __SCREAMING_SNAKE_CASE : str = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='''numpy''' ): __SCREAMING_SNAKE_CASE : Tuple = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='''pandas''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): __SCREAMING_SNAKE_CASE : Tuple = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) __SCREAMING_SNAKE_CASE : Tuple = map(lowercase__ , function=lowercase__ , batched=lowercase__ ) __SCREAMING_SNAKE_CASE : int = filter(lowercase__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase__ , '''wb''' ) as f: f.write(json.dumps(lowercase__ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
9
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
from __future__ import annotations def lowerCAmelCase_ ( __a ) -> bool: """simple docstring""" return len(set(__a ) ) == len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
10
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'AutoTokenizer' UpperCAmelCase__ : Union[str, Any] = ['tokenizer'] UpperCAmelCase__ : Optional[Any] = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=None ): super().__init__(UpperCamelCase_ ) __lowerCamelCase = speaker_embeddings @classmethod def lowerCAmelCase__ ( cls: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple="speaker_embeddings_path.json" , **UpperCamelCase_: Dict ): if speaker_embeddings_dict_path is not None: __lowerCamelCase = get_file_from_repo( UpperCamelCase_ , UpperCamelCase_ , subfolder=kwargs.pop("""subfolder""" , UpperCamelCase_ ) , cache_dir=kwargs.pop("""cache_dir""" , UpperCamelCase_ ) , force_download=kwargs.pop("""force_download""" , UpperCamelCase_ ) , proxies=kwargs.pop("""proxies""" , UpperCamelCase_ ) , resume_download=kwargs.pop("""resume_download""" , UpperCamelCase_ ) , local_files_only=kwargs.pop("""local_files_only""" , UpperCamelCase_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , UpperCamelCase_ ) , revision=kwargs.pop("""revision""" , UpperCamelCase_ ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(UpperCamelCase_ , UpperCamelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) __lowerCamelCase = None else: with open(UpperCamelCase_ ) as speaker_embeddings_json: __lowerCamelCase = json.load(UpperCamelCase_ ) else: __lowerCamelCase = None __lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) return cls(tokenizer=UpperCamelCase_ , speaker_embeddings=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int="speaker_embeddings_path.json" , UpperCamelCase_: Optional[Any]="speaker_embeddings" , UpperCamelCase_: bool = False , **UpperCamelCase_: Union[str, Any] , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(UpperCamelCase_ , UpperCamelCase_ , """v2""" ) , exist_ok=UpperCamelCase_ ) __lowerCamelCase = {} __lowerCamelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __lowerCamelCase = self._load_voice_preset(UpperCamelCase_ ) __lowerCamelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , UpperCamelCase_ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=UpperCamelCase_ , ) __lowerCamelCase = os.path.join(UpperCamelCase_ , F'{prompt_key}_{key}.npy' ) __lowerCamelCase = tmp_dict with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , """w""" ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) super().save_pretrained(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str = None , **UpperCamelCase_: List[Any] ): __lowerCamelCase = self.speaker_embeddings[voice_preset] __lowerCamelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) __lowerCamelCase = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , UpperCamelCase_ ) , cache_dir=kwargs.pop("""cache_dir""" , UpperCamelCase_ ) , force_download=kwargs.pop("""force_download""" , UpperCamelCase_ ) , proxies=kwargs.pop("""proxies""" , UpperCamelCase_ ) , resume_download=kwargs.pop("""resume_download""" , UpperCamelCase_ ) , local_files_only=kwargs.pop("""local_files_only""" , UpperCamelCase_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , UpperCamelCase_ ) , revision=kwargs.pop("""revision""" , UpperCamelCase_ ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) __lowerCamelCase = np.load(UpperCamelCase_ ) return voice_preset_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[dict] = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__( self: Any , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[Any]="pt" , UpperCamelCase_: str=2_56 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Dict , ): if voice_preset is not None and not isinstance(UpperCamelCase_ , UpperCamelCase_ ): if ( isinstance(UpperCamelCase_ , UpperCamelCase_ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __lowerCamelCase = self._load_voice_preset(UpperCamelCase_ ) else: if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not voice_preset.endswith(""".npz""" ): __lowerCamelCase = voice_preset + """.npz""" __lowerCamelCase = np.load(UpperCamelCase_ ) if voice_preset is not None: self._validate_voice_preset_dict(UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ ) __lowerCamelCase = self.tokenizer( UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) if voice_preset is not None: __lowerCamelCase = voice_preset return encoded_text
12
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Dict = KandinskyImgaImgPipeline _UpperCAmelCase : Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] _UpperCAmelCase : Union[str, Any] = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] _UpperCAmelCase : Dict = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase : Tuple = False @property def _SCREAMING_SNAKE_CASE ( self : Dict): return 32 @property def _SCREAMING_SNAKE_CASE ( self : int): return 32 @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self.time_input_dim @property def _SCREAMING_SNAKE_CASE ( self : Any): return self.time_input_dim * 4 @property def _SCREAMING_SNAKE_CASE ( self : str): return 100 @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def _SCREAMING_SNAKE_CASE ( self : Tuple): torch.manual_seed(0) SCREAMING_SNAKE_CASE_: int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE_: Optional[int] = MultilingualCLIP(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = text_encoder.eval() return text_encoder @property def _SCREAMING_SNAKE_CASE ( self : str): torch.manual_seed(0) SCREAMING_SNAKE_CASE_: List[str] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } SCREAMING_SNAKE_CASE_: Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__) return model @property def _SCREAMING_SNAKE_CASE ( self : int): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _SCREAMING_SNAKE_CASE ( self : Any): torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Any = VQModel(**self.dummy_movq_kwargs) return model def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = self.dummy_text_encoder SCREAMING_SNAKE_CASE_: List[Any] = self.dummy_tokenizer SCREAMING_SNAKE_CASE_: Optional[int] = self.dummy_unet SCREAMING_SNAKE_CASE_: Any = self.dummy_movq SCREAMING_SNAKE_CASE_: str = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } SCREAMING_SNAKE_CASE_: str = DDIMScheduler(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=0): SCREAMING_SNAKE_CASE_: Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowerCAmelCase__) # create init_image SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_: Any = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB").resize((256, 256)) if str(lowerCAmelCase__).startswith("mps"): SCREAMING_SNAKE_CASE_: List[str] = torch.manual_seed(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: List[Any] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Optional[int] = "cpu" SCREAMING_SNAKE_CASE_: Any = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Optional[Any] = self.pipeline_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = pipe(**self.get_dummy_inputs(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: List[str] = output.images SCREAMING_SNAKE_CASE_: Any = pipe( **self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0] SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_: Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_: str = np.array( [0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : int): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy") SCREAMING_SNAKE_CASE_: Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png") SCREAMING_SNAKE_CASE_: str = "A red cartoon frog, 4k" SCREAMING_SNAKE_CASE_: int = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa) pipe_prior.to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_: int = pipeline.to(lowerCAmelCase__) pipeline.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cpu").manual_seed(0) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = pipe_prior( lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() SCREAMING_SNAKE_CASE_: Any = pipeline( lowerCAmelCase__ , image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) SCREAMING_SNAKE_CASE_: str = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
13
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : List[str] = """▁""" _lowerCamelCase : List[Any] = {"""vocab_file""": """spiece.model"""} _lowerCamelCase : List[str] = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } _lowerCamelCase : Optional[int] = { """google/reformer-crime-and-punishment""": 524288, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : Optional[Any]="<unk>" , UpperCAmelCase__ : List[str]=[] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None: '''simple docstring''' A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCAmelCase__) @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]: '''simple docstring''' A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Any) ->Dict: '''simple docstring''' A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : int , UpperCAmelCase__ : List[str]) ->Dict: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any) ->int: '''simple docstring''' return self.sp_model.piece_to_id(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str]) ->Tuple: '''simple docstring''' if index < self.sp_model.get_piece_size(): A__ = self.sp_model.IdToPiece(UpperCAmelCase__) return token def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict) ->Tuple: '''simple docstring''' A__ = [] A__ = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCAmelCase__) + token A__ = [] else: current_sub_tokens.append(UpperCAmelCase__) out_string += self.sp_model.decode(UpperCAmelCase__) return out_string.strip() def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return A__ = os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(UpperCAmelCase__ , '''wb''') as fi: A__ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__) return (out_vocab_file,)
14
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : Any ,**A : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __A = deprecated_arg[3:] __A = not kwargs.pop(A ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __A = kwargs.pop("tpu_name" ,self.tpu_name ) __A = kwargs.pop("device_idx" ,self.device_idx ) __A = kwargs.pop("eager_mode" ,self.eager_mode ) __A = kwargs.pop("use_xla" ,self.use_xla ) super().__init__(**A ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of TPU"} , ) snake_case_ = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark models in eager model."} ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def UpperCamelCase_ ( self : Dict ): requires_backends(self ,["tf"] ) __A = None if self.tpu: try: if self.tpu_name: __A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __A = None return tpu @cached_property def UpperCamelCase_ ( self : Tuple ): requires_backends(self ,["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __A = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"GPU" ) __A = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] ,"GPU" ) # disable GPU __A = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def UpperCamelCase_ ( self : List[Any] ): requires_backends(self ,["tf"] ) return self._setup_tpu is not None @property def UpperCamelCase_ ( self : Any ): requires_backends(self ,["tf"] ) return self._setup_strategy @property def UpperCamelCase_ ( self : int ): requires_backends(self ,["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def UpperCamelCase_ ( self : Tuple ): requires_backends(self ,["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCamelCase_ ( self : Union[str, Any] ): return self.n_gpu > 0
15
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __A ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None @property def UpperCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" lowercase__ : int = (3, 32, 128) lowercase__ : List[str] = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) lowercase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(_snake_case ) + '''\n''' ) lowercase__ : Optional[Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } lowercase__ : Optional[int] = os.path.join(self.tmpdirname ,_snake_case ) with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''' ) as fp: json.dump(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Any ,**_snake_case : Dict ) -> List[str]: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCAmelCase ( self : Any ,**_snake_case : List[Any] ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self : int ) -> List[str]: """simple docstring""" lowercase__ : Optional[Any] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta ) lowercase__ : Any = Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) return image_input def UpperCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : int = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case ) self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCAmelCase ( self : int ) -> Dict: """simple docstring""" lowercase__ : Dict = self.get_tokenizer() lowercase__ : Dict = self.get_image_processor() lowercase__ : List[str] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) processor.save_pretrained(self.tmpdirname ) lowercase__ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' ) lowercase__ : List[str] = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 ) lowercase__ : Dict = MgpstrProcessor.from_pretrained( self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_snake_case ,padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer ,_snake_case ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : List[Any] = self.get_image_processor() lowercase__ : Dict = self.get_tokenizer() lowercase__ : List[Any] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) lowercase__ : Tuple = self.prepare_image_inputs() lowercase__ : Any = image_processor(_snake_case ,return_tensors='''np''' ) lowercase__ : str = processor(images=_snake_case ,return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def UpperCAmelCase ( self : List[str] ) -> Any: """simple docstring""" lowercase__ : List[str] = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : str = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) lowercase__ : Tuple = '''test''' lowercase__ : str = processor(text=_snake_case ) lowercase__ : List[Any] = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" lowercase__ : Any = self.get_image_processor() lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : Union[str, Any] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) lowercase__ : Union[str, Any] = '''test''' lowercase__ : List[Any] = self.prepare_image_inputs() lowercase__ : Tuple = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : List[Any] = processor.char_decode(_snake_case ) lowercase__ : Dict = tokenizer.batch_decode(_snake_case ) lowercase__ : Optional[int] = [seq.replace(''' ''' ,'''''' ) for seq in decoded_tok] self.assertListEqual(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" lowercase__ : int = self.get_image_processor() lowercase__ : Union[str, Any] = self.get_tokenizer() lowercase__ : int = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) lowercase__ : List[Any] = None lowercase__ : List[Any] = self.prepare_image_inputs() lowercase__ : Tuple = processor(text=_snake_case ,images=_snake_case ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Any = MgpstrProcessor(tokenizer=_snake_case ,image_processor=_snake_case ) lowercase__ : Tuple = torch.randn(1 ,27 ,38 ) lowercase__ : int = torch.randn(1 ,27 ,50_257 ) lowercase__ : List[Any] = torch.randn(1 ,27 ,30_522 ) lowercase__ : Any = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) ,['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
16
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
"""simple docstring""" from __future__ import annotations def _A ( UpperCamelCase_ : list[int]) -> int: '''simple docstring''' if not nums: return 0 __lowercase = nums[0] __lowercase = 0 for num in nums[1:]: __lowercase ,__lowercase = ( max_excluding + num, max(UpperCamelCase_, UpperCamelCase_), ) return max(UpperCamelCase_, UpperCamelCase_) if __name__ == "__main__": import doctest doctest.testmod()
17
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
def _snake_case ( lowerCAmelCase : int ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def _snake_case ( lowerCAmelCase : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = 0 SCREAMING_SNAKE_CASE_ : Tuple = number while duplicate > 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = divmod(lowerCAmelCase , 1_0 ) fact_sum += factorial(lowerCAmelCase ) return fact_sum == number if __name__ == "__main__": print('''Program to check whether a number is a Krisnamurthy Number or not.''') __lowerCamelCase : Tuple = int(input('''Enter number: ''').strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
18
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __A =logging.get_logger(__name__) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): lowerCamelCase_ = "backbone." if is_semantic else "" lowerCamelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ (F'{prefix}cls_token', "beit.embeddings.cls_token"), (F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"), (F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"), (F'{prefix}pos_embed', "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): for i in range(config.num_hidden_layers ): lowerCamelCase_ = "backbone." if is_semantic else "" # queries, keys and values lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' ) lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' ) lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' ) lowerCamelCase_ = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase_ = q_bias lowerCamelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase_ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase_ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' ) lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' ) lowerCamelCase_ = gamma_a lowerCamelCase_ = gamma_a def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = dct.pop(lowerCamelCase__ ) lowerCamelCase_ = val def lowerCamelCase_ ( ): lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): lowerCamelCase_ = False if "rvlcdip" in checkpoint_url else True lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=lowerCamelCase__ , use_mask_token=lowerCamelCase__ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase_ = 1_0_2_4 lowerCamelCase_ = 4_0_9_6 lowerCamelCase_ = 2_4 lowerCamelCase_ = 1_6 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase_ = 1_6 lowerCamelCase_ = "huggingface/label-files" lowerCamelCase_ = "rvlcdip-id2label.json" lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"] lowerCamelCase_ = create_rename_keys(lowerCamelCase__ , has_lm_head=lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , has_lm_head=lowerCamelCase__ ) # load HuggingFace model lowerCamelCase_ = BeitForMaskedImageModeling(lowerCamelCase__ ) if has_lm_head else BeitForImageClassification(lowerCamelCase__ ) model.eval() model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image lowerCamelCase_ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" ) lowerCamelCase_ = encoding["pixel_values"] lowerCamelCase_ = model(lowerCamelCase__ ) lowerCamelCase_ = outputs.logits # verify logits lowerCamelCase_ = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2] assert logits.shape == torch.Size(lowerCamelCase__ ), "Shape of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: if has_lm_head: lowerCamelCase_ = "dit-base" if "base" in checkpoint_url else "dit-large" else: lowerCamelCase_ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase__ , ) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase__ , ) if __name__ == "__main__": __A =argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) __A =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
19
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : Optional[Any] = {"""vocab_file""": """vocab.json"""} lowercase : Optional[Any] = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } lowercase : int = {"""mgp-str""": 27} class __snake_case ( lowerCAmelCase ): _a : List[Any]= VOCAB_FILES_NAMES _a : Optional[int]= PRETRAINED_VOCAB_FILES_MAP _a : str= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,snake_case ,snake_case="[GO]" ,snake_case="[GO]" ,snake_case="[s]" ,snake_case="[GO]" ,**snake_case ): '''simple docstring''' super().__init__( unk_token=snake_case ,bos_token=snake_case ,eos_token=snake_case ,pad_token=snake_case ,**snake_case ,) with open(snake_case ,encoding="""utf-8""" ) as vocab_handle: lowercase : Dict = json.load(snake_case ) lowercase : Dict = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return len(self.vocab ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return dict(self.vocab ,**self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Dict = [] for s in text: char_tokens.extend(snake_case ) return char_tokens def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' return self.vocab.get(snake_case ,self.vocab.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' return self.decoder.get(snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ): '''simple docstring''' if not os.path.isdir(snake_case ): logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case ) ) return lowercase : Dict = os.path.join( snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=snake_case ,ensure_ascii=snake_case ) + """\n""" ) return (vocab_file,)
20
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1) SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowerCamelCase: lowercase_ : int lowercase_ : Node | None class _lowerCamelCase: def __init__( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : Node | None = None for i in sorted(lowerCamelCase, reverse=lowerCamelCase): _lowercase : Tuple = Node(lowerCamelCase, self.head) def __iter__( self) -> Iterator[int]: """simple docstring""" _lowercase : Union[str, Any] = self.head while node: yield node.data _lowercase : int = node.next_node def __len__( self) -> int: """simple docstring""" return sum(1 for _ in self) def __str__( self) -> str: """simple docstring""" return " -> ".join([str(lowerCamelCase) for node in self]) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList: return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : int = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
21
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : str = CanineTokenizer _lowerCamelCase : Tuple = False def lowercase ( self : List[Any] ): super().setUp() _UpperCAmelCase = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : List[str] ): return CanineTokenizer.from_pretrained("google/canine-s" ) def lowercase ( self : Union[str, Any] , **snake_case_ : List[Any] ): _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ ) _UpperCAmelCase = 1_0_2_4 return tokenizer @require_torch def lowercase ( self : List[str] ): _UpperCAmelCase = self.canine_tokenizer _UpperCAmelCase = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off _UpperCAmelCase = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0] # fmt: on _UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" ) self.assertIsInstance(snake_case_ , snake_case_ ) _UpperCAmelCase = list(batch.input_ids.numpy()[0] ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertEqual((2, 3_9) , batch.input_ids.shape ) self.assertEqual((2, 3_9) , batch.attention_mask.shape ) @require_torch def lowercase ( self : List[Any] ): _UpperCAmelCase = self.canine_tokenizer _UpperCAmelCase = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] _UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , snake_case_ ) self.assertIn("attention_mask" , snake_case_ ) self.assertIn("token_type_ids" , snake_case_ ) @require_torch def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.canine_tokenizer _UpperCAmelCase = [ "What's the weater?", "It's about 25 degrees.", ] _UpperCAmelCase = tokenizer( text_target=snake_case_ , max_length=3_2 , padding="max_length" , truncation=snake_case_ , return_tensors="pt" ) self.assertEqual(3_2 , targets["input_ids"].shape[1] ) def lowercase ( self : Union[str, Any] ): # safety check on max_len default value so we are sure the test works _UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = " He is very happy, UNwant\u00E9d,running" _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) tokenizer.save_pretrained(snake_case_ ) _UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ ) _UpperCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) shutil.rmtree(snake_case_ ) _UpperCAmelCase = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = " He is very happy, UNwant\u00E9d,running" _UpperCAmelCase = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _UpperCAmelCase = chr(0Xe0_07 ) additional_special_tokens.append(snake_case_ ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) tokenizer.save_pretrained(snake_case_ ) _UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ ) _UpperCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertIn(snake_case_ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase , _UpperCAmelCase = self.get_clean_sequence(snake_case_ ) # a special token for Canine can be defined as follows: _UpperCAmelCase = 0Xe0_05 _UpperCAmelCase = chr(snake_case_ ) tokenizer.add_special_tokens({"cls_token": special_token} ) _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertEqual(len(snake_case_ ) , 1 ) _UpperCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=snake_case_ ) _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , input_encoded + special_token_id ) _UpperCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) self.assertTrue(special_token not in decoded ) def lowercase ( self : int ): _UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase = chr(0Xe0_05 ) _UpperCAmelCase = chr(0Xe0_06 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=snake_case_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) _UpperCAmelCase = tokenizer.tokenize(snake_case_ ) _UpperCAmelCase = tokenizer.tokenize(snake_case_ ) self.assertEqual(len(snake_case_ ) , 1 ) self.assertEqual(len(snake_case_ ) , 1 ) self.assertEqual(token_a[0] , snake_case_ ) self.assertEqual(token_a[0] , snake_case_ ) @require_tokenizers def lowercase ( self : Any ): _UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: _UpperCAmelCase = 0Xe0_06 _UpperCAmelCase = chr(snake_case_ ) _UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(snake_case_ ) tokenizer.from_pretrained(snake_case_ ) def lowercase ( self : List[Any] ): _UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(snake_case_ ) with open(os.path.join(snake_case_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: _UpperCAmelCase = json.load(snake_case_ ) with open(os.path.join(snake_case_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: _UpperCAmelCase = json.load(snake_case_ ) # a special token for Canine can be defined as follows: _UpperCAmelCase = 0Xe0_06 _UpperCAmelCase = chr(snake_case_ ) _UpperCAmelCase = [new_token_a] _UpperCAmelCase = [new_token_a] with open(os.path.join(snake_case_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case_ , snake_case_ ) with open(os.path.join(snake_case_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case_ , snake_case_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _UpperCAmelCase = tokenizer_class.from_pretrained(snake_case_ , extra_ids=0 ) self.assertIn(snake_case_ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _UpperCAmelCase = 0Xe0_07 _UpperCAmelCase = chr(snake_case_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _UpperCAmelCase = [AddedToken(snake_case_ , lstrip=snake_case_ )] _UpperCAmelCase = tokenizer_class.from_pretrained( snake_case_ , additional_special_tokens=snake_case_ , extra_ids=0 ) self.assertIn(snake_case_ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase ( self : Tuple ): _UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase = "hello world" if self.space_between_special_tokens: _UpperCAmelCase = "[CLS] hello world [SEP]" else: _UpperCAmelCase = input _UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) _UpperCAmelCase = tokenizer.decode(snake_case_ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(snake_case_ , [output, output.lower()] ) def lowercase ( self : str ): _UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] _UpperCAmelCase = "a" _UpperCAmelCase = ord(snake_case_ ) for attr in attributes_list: setattr(snake_case_ , attr + "_id" , snake_case_ ) self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(getattr(snake_case_ , attr + "_id" ) , snake_case_ ) setattr(snake_case_ , attr + "_id" , snake_case_ ) self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(getattr(snake_case_ , attr + "_id" ) , snake_case_ ) setattr(snake_case_ , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(snake_case_ , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(snake_case_ , "additional_special_tokens_ids" ) , [] ) _UpperCAmelCase = 0Xe0_06 _UpperCAmelCase = chr(snake_case_ ) setattr(snake_case_ , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(snake_case_ , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(snake_case_ , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def lowercase ( self : Any ): pass def lowercase ( self : List[Any] ): pass def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[Any] ): pass def lowercase ( self : List[Any] ): pass def lowercase ( self : int ): pass def lowercase ( self : int ): pass def lowercase ( self : Optional[Any] ): pass
22
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
'''simple docstring''' from collections.abc import Callable import numpy as np def snake_case_ ( _lowerCAmelCase : Callable , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> np.ndarray: UpperCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) UpperCAmelCase : Tuple = np.zeros((n + 1,) ) UpperCAmelCase : Optional[int] = ya UpperCAmelCase : Optional[Any] = xa for k in range(_lowerCAmelCase ): UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowerCAmelCase , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Tuple=None ) -> Optional[int]: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" __snake_case = nn.Parameter(snake_case_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" __snake_case = nn.Parameter(snake_case_ ) def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Dict ) -> Dict: # set torch weights for 1-to-1 comparison __snake_case = np.asarray(weights[0] ) __snake_case = np.asarray(weights[1] ) __snake_case = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Dict ) -> Union[str, Any]: # set torch weights for 1-to-1 comparison __snake_case = np.asarray(weights[0] ) __snake_case = np.asarray(weights[1] ) __snake_case = np.asarray(weights[2] ) __snake_case = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , ) def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] ) -> List[Any]: # layernorm 1 __snake_case = weights[0][0][0] __snake_case = np.asarray(layer_norm_a[0] ) __snake_case = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , ) # lsh weights + output __snake_case = weights[0][1] if len(snake_case_ ) < 4: set_layer_weights_in_torch_lsh(snake_case_ , torch_block.attention , snake_case_ ) else: set_layer_weights_in_torch_local(snake_case_ , torch_block.attention , snake_case_ ) # intermediate weighs __snake_case = weights[2][0][1][2] # Chunked Feed Forward if len(snake_case_ ) == 4: __snake_case = intermediate_weights[2] # layernorm 2 __snake_case = np.asarray(intermediate_weights[0][0] ) __snake_case = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , ) # intermediate dense __snake_case = np.asarray(intermediate_weights[1][0] ) __snake_case = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , ) # intermediate out __snake_case = np.asarray(intermediate_weights[4][0] ) __snake_case = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> str: # reformer model __snake_case = torch_model.reformer # word embeds __snake_case = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case_ ) , ) if isinstance(weights[3] , snake_case_ ): __snake_case = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __snake_case = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" __snake_case = nn.Parameter(torch.tensor(snake_case_ ) ) __snake_case = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( snake_case_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(snake_case_ , snake_case_ , snake_case_ ) # output layer norm __snake_case = np.asarray(weights[7][0] ) __snake_case = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , ) # output embeddings __snake_case = np.asarray(weights[9][0] ) __snake_case = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , ) def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple ) -> Tuple: # Initialise PyTorch model __snake_case = ReformerConfig.from_json_file(snake_case_ ) print(f"""Building PyTorch model from configuration: {config}""" ) __snake_case = ReformerModelWithLMHead(snake_case_ ) with open(snake_case_ , '''rb''' ) as f: __snake_case = pickle.load(snake_case_ )['''weights'''] set_model_weights_in_torch(snake_case_ , snake_case_ , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) snake_case_ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
24
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
"""simple docstring""" import requests UpperCAmelCase__ : Any = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=' def lowercase_ ( _snake_case ): # fetching a list of articles in json format SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["""articles"""] ,1 ): print(f'''{i}.) {article['title']}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
25
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( nn.Module ): def __init__( self , _a=3 , _a=3 , _a=("DownEncoderBlock2D",) , _a=(64,) , _a=2 , _a=32 , _a="silu" , _a=True , ) -> Any: super().__init__() _A : List[str] = layers_per_block _A : Optional[Any] = torch.nn.Convad( _a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) _A : str = None _A : int = nn.ModuleList([] ) # down _A : Tuple = block_out_channels[0] for i, down_block_type in enumerate(_a ): _A : int = output_channel _A : List[str] = block_out_channels[i] _A : Optional[Any] = i == len(_a ) - 1 _A : Tuple = get_down_block( _a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , ) self.down_blocks.append(_a ) # mid _A : List[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , ) # out _A : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 ) _A : Optional[Any] = nn.SiLU() _A : List[str] = 2 * out_channels if double_z else out_channels _A : List[Any] = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 ) _A : Any = False def a__ ( self , _a ) -> List[str]: _A : Any = x _A : Optional[Any] = self.conv_in(_a ) if self.training and self.gradient_checkpointing: def create_custom_forward(_a ): def custom_forward(*_a ): return module(*_a ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: _A : int = torch.utils.checkpoint.checkpoint( create_custom_forward(_a ) , _a , use_reentrant=_a ) # middle _A : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , use_reentrant=_a ) else: for down_block in self.down_blocks: _A : str = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a ) # middle _A : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a ) else: # down for down_block in self.down_blocks: _A : List[Any] = down_block(_a ) # middle _A : Dict = self.mid_block(_a ) # post-process _A : Union[str, Any] = self.conv_norm_out(_a ) _A : List[str] = self.conv_act(_a ) _A : str = self.conv_out(_a ) return sample class lowercase ( nn.Module ): def __init__( self , _a=3 , _a=3 , _a=("UpDecoderBlock2D",) , _a=(64,) , _a=2 , _a=32 , _a="silu" , _a="group" , ) -> Optional[int]: super().__init__() _A : Tuple = layers_per_block _A : Any = nn.Convad( _a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) _A : Optional[Any] = None _A : str = nn.ModuleList([] ) _A : Optional[int] = in_channels if norm_type == """spatial""" else None # mid _A : List[str] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , ) # up _A : List[str] = list(reversed(_a ) ) _A : List[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(_a ): _A : Any = output_channel _A : List[str] = reversed_block_out_channels[i] _A : Optional[Any] = i == len(_a ) - 1 _A : List[str] = get_up_block( _a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , ) self.up_blocks.append(_a ) _A : Optional[int] = output_channel # out if norm_type == "spatial": _A : Dict = SpatialNorm(block_out_channels[0] , _a ) else: _A : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 ) _A : List[str] = nn.SiLU() _A : str = nn.Convad(block_out_channels[0] , _a , 3 , padding=1 ) _A : int = False def a__ ( self , _a , _a=None ) -> str: _A : Union[str, Any] = z _A : Dict = self.conv_in(_a ) _A : str = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(_a ): def custom_forward(*_a ): return module(*_a ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle _A : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a ) _A : Dict = sample.to(_a ) # up for up_block in self.up_blocks: _A : Tuple = torch.utils.checkpoint.checkpoint( create_custom_forward(_a ) , _a , _a , use_reentrant=_a ) else: # middle _A : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , _a ) _A : Optional[int] = sample.to(_a ) # up for up_block in self.up_blocks: _A : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a ) else: # middle _A : Tuple = self.mid_block(_a , _a ) _A : Dict = sample.to(_a ) # up for up_block in self.up_blocks: _A : Dict = up_block(_a , _a ) # post-process if latent_embeds is None: _A : List[Any] = self.conv_norm_out(_a ) else: _A : int = self.conv_norm_out(_a , _a ) _A : Tuple = self.conv_act(_a ) _A : Tuple = self.conv_out(_a ) return sample class lowercase ( nn.Module ): def __init__( self , _a , _a , _a , _a=None , _a="random" , _a=False , _a=True ) -> Union[str, Any]: super().__init__() _A : List[str] = n_e _A : List[str] = vq_embed_dim _A : int = beta _A : Optional[Any] = legacy _A : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) _A : Tuple = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) _A : Optional[Any] = self.used.shape[0] _A : Dict = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": _A : List[str] = self.re_embed _A : str = self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: _A : Optional[Any] = n_e _A : str = sane_index_shape def a__ ( self , _a ) -> List[str]: _A : Union[str, Any] = inds.shape assert len(_a ) > 1 _A : Dict = inds.reshape(ishape[0] , -1 ) _A : Any = self.used.to(_a ) _A : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() _A : Optional[int] = match.argmax(-1 ) _A : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": _A : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: _A : Tuple = self.unknown_index return new.reshape(_a ) def a__ ( self , _a ) -> Optional[int]: _A : Dict = inds.shape assert len(_a ) > 1 _A : Union[str, Any] = inds.reshape(ishape[0] , -1 ) _A : List[Any] = self.used.to(_a ) if self.re_embed > self.used.shape[0]: # extra token _A : Union[str, Any] = 0 # simply set to zero _A : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a ) return back.reshape(_a ) def a__ ( self , _a ) -> str: # reshape z -> (batch, height, width, channel) and flatten _A : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous() _A : List[str] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z _A : Tuple = torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 ) _A : List[Any] = self.embedding(_a ).view(z.shape ) _A : Optional[int] = None _A : int = None # compute loss for embedding if not self.legacy: _A : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: _A : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients _A : int = z + (z_q - z).detach() # reshape back to match original input shape _A : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: _A : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis _A : List[Any] = self.remap_to_used(_a ) _A : str = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: _A : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def a__ ( self , _a , _a ) -> Dict: # shape specifying (batch, height, width, channel) if self.remap is not None: _A : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis _A : str = self.unmap_to_all(_a ) _A : Optional[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors _A : Optional[int] = self.embedding(_a ) if shape is not None: _A : Tuple = z_q.view(_a ) # reshape back to match original input shape _A : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a=False ) -> List[str]: _A : str = parameters _A , _A : Dict = torch.chunk(_a , 2 , dim=1 ) _A : List[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) _A : Optional[int] = deterministic _A : List[str] = torch.exp(0.5 * self.logvar ) _A : Dict = torch.exp(self.logvar ) if self.deterministic: _A : str = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def a__ ( self , _a = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype _A : Optional[int] = randn_tensor( self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype ) _A : Optional[Any] = self.mean + self.std * sample return x def a__ ( self , _a=None ) -> int: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def a__ ( self , _a , _a=[1, 2, 3] ) -> Dict: if self.deterministic: return torch.Tensor([0.0] ) _A : Dict = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a ) def a__ ( self ) -> int: return self.mean
26
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__a , 'num_heads' ) ) class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ): '''simple docstring''' __a : str = parent __a : List[Any] = batch_size __a : Optional[int] = image_size __a : List[str] = patch_sizes __a : str = patch_stride __a : Any = patch_padding __a : Dict = is_training __a : Union[str, Any] = use_labels __a : Dict = num_labels __a : List[Any] = num_channels __a : Any = embed_dim __a : int = num_heads __a : Optional[int] = stride_kv __a : Dict = depth __a : List[str] = cls_token __a : List[Any] = attention_drop_rate __a : Tuple = initializer_range __a : int = layer_norm_eps def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Dict = None if self.use_labels: # create a random int32 tensor of given shape __a : str = ids_tensor([self.batch_size] , self.num_labels ) __a : str = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = TFCvtModel(config=__a ) __a : Dict = model(__a , training=__a ) __a : Any = (self.image_size, self.image_size) __a , __a : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Optional[int] = TFCvtForImageClassification(__a ) __a : Dict = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtModelTester(self ) __a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) __a : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : List[str] = model_class(__a ) __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Union[str, Any] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = TFCvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __a : Tuple = self.default_image_processor __a : Any = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ) # forward pass __a : Any = model(**__a ) # verify the logits __a : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
27
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCamelCase ( A__ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = torch.exp(A__ ) UpperCamelCase = torch.sum(A__ , dim=1 ) # sum of exp(x_i) UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(A__ ) - B / A class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase__ : Any ): """simple docstring""" super().__init__() UpperCamelCase = config.output_attentions UpperCamelCase = config.output_hidden_states UpperCamelCase = nn.ModuleList([BertLayer(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] ) UpperCamelCase = nn.ModuleList([BertHighway(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] ) UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )] def A ( self : Dict , UpperCamelCase__ : str ): """simple docstring""" if (type(UpperCamelCase__ ) is float) or (type(UpperCamelCase__ ) is int): for i in range(len(self.early_exit_entropy ) ): UpperCamelCase = x else: UpperCamelCase = x def A ( self : Union[str, Any] , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , ): """simple docstring""" UpperCamelCase = () UpperCamelCase = () UpperCamelCase = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: UpperCamelCase = all_hidden_states + (hidden_states,) UpperCamelCase = layer_module( UpperCamelCase__ , UpperCamelCase__ , head_mask[i] , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = layer_outputs[0] if self.output_attentions: UpperCamelCase = all_attentions + (layer_outputs[1],) UpperCamelCase = (hidden_states,) if self.output_hidden_states: UpperCamelCase = current_outputs + (all_hidden_states,) if self.output_attentions: UpperCamelCase = current_outputs + (all_attentions,) UpperCamelCase = self.highway[i](UpperCamelCase__ ) # logits, pooled_output if not self.training: UpperCamelCase = highway_exit[0] UpperCamelCase = entropy(UpperCamelCase__ ) UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy UpperCamelCase = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase__ , i + 1 ) else: UpperCamelCase = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: UpperCamelCase = all_hidden_states + (hidden_states,) UpperCamelCase = (hidden_states,) if self.output_hidden_states: UpperCamelCase = outputs + (all_hidden_states,) if self.output_attentions: UpperCamelCase = outputs + (all_attentions,) UpperCamelCase = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , _a , ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , UpperCamelCase__ : Any ): """simple docstring""" super().__init__(UpperCamelCase__ ) UpperCamelCase = config UpperCamelCase = BertEmbeddings(UpperCamelCase__ ) UpperCamelCase = DeeBertEncoder(UpperCamelCase__ ) UpperCamelCase = BertPooler(UpperCamelCase__ ) self.init_weights() def A ( self : int ): """simple docstring""" self.encoder.init_highway_pooler(self.pooler ) def A ( self : List[Any] ): """simple docstring""" return self.embeddings.word_embeddings def A ( self : List[str] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = value def A ( self : Any , UpperCamelCase__ : List[str] ): """simple docstring""" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase__ ) @add_start_docstrings_to_model_forward(UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=None , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: UpperCamelCase = input_ids.size() elif inputs_embeds is not None: UpperCamelCase = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ ) if encoder_attention_mask is None: UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ ) if token_type_ids is None: UpperCamelCase = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCamelCase = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: UpperCamelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: UpperCamelCase = encoder_attention_mask[:, None, None, :] UpperCamelCase = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCamelCase = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers ) UpperCamelCase = self.embeddings( input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ ) UpperCamelCase = self.encoder( UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) UpperCamelCase = encoder_outputs[0] UpperCamelCase = self.pooler(UpperCamelCase__ ) UpperCamelCase = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ): """simple docstring""" UpperCamelCase = message UpperCamelCase = exit_layer # start from 1! class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__() UpperCamelCase = BertPooler(UpperCamelCase__ ) UpperCamelCase = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels ) def A ( self : Dict , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = encoder_outputs[0] UpperCamelCase = self.pooler(UpperCamelCase__ ) # "return" pooler_output # BertModel UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification UpperCamelCase = bmodel_output[1] UpperCamelCase = self.dropout(UpperCamelCase__ ) UpperCamelCase = self.classifier(UpperCamelCase__ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , _a , ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" super().__init__(UpperCamelCase__ ) UpperCamelCase = config.num_labels UpperCamelCase = config.num_hidden_layers UpperCamelCase = DeeBertModel(UpperCamelCase__ ) UpperCamelCase = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) def A ( self : str , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : str=-1 , UpperCamelCase__ : Union[str, Any]=False , ): """simple docstring""" UpperCamelCase = self.num_layers try: UpperCamelCase = self.bert( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits UpperCamelCase = outputs[1] UpperCamelCase = self.dropout(UpperCamelCase__ ) UpperCamelCase = self.classifier(UpperCamelCase__ ) UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCamelCase = e.message UpperCamelCase = e.exit_layer UpperCamelCase = outputs[0] if not self.training: UpperCamelCase = entropy(UpperCamelCase__ ) UpperCamelCase = [] UpperCamelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCamelCase = MSELoss() UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase = CrossEntropyLoss() UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits UpperCamelCase = [] for highway_exit in outputs[-1]: UpperCamelCase = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCamelCase = MSELoss() UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase = CrossEntropyLoss() UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase__ ) if train_highway: UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCamelCase = (loss,) + outputs if not self.training: UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCamelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
28
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = [False] * len(__snake_case ) UpperCAmelCase_ : Dict = [-1] * len(__snake_case ) def dfs(__snake_case : Dict , __snake_case : Tuple ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Dict = c for u in graph[v]: if not visited[u]: dfs(__snake_case , 1 - c ) for i in range(len(__snake_case ) ): if not visited[i]: dfs(__snake_case , 0 ) for i in range(len(__snake_case ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
29
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class lowercase__: """simple docstring""" def _lowercase ( self : int ) -> Dict: torch.manual_seed(0 ) lowercase_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase_ = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) lowercase_ = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self : str ) -> int: torch.manual_seed(0 ) lowercase_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowercase_ = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase_ = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) lowercase_ = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) lowercase_ = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _lowercase ( self : str ) -> Tuple: lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = inputs['''prompt'''] lowercase_ = inputs['''generator'''] lowercase_ = inputs['''num_inference_steps'''] lowercase_ = inputs['''output_type'''] if "image" in inputs: lowercase_ = inputs['''image'''] else: lowercase_ = None if "mask_image" in inputs: lowercase_ = inputs['''mask_image'''] else: lowercase_ = None if "original_image" in inputs: lowercase_ = inputs['''original_image'''] else: lowercase_ = None lowercase_ , lowercase_ = pipe.encode_prompt(SCREAMING_SNAKE_CASE_ ) # inputs with prompt converted to embeddings lowercase_ = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: lowercase_ = image if mask_image is not None: lowercase_ = mask_image if original_image is not None: lowercase_ = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) pipe_loaded.to(SCREAMING_SNAKE_CASE_ ) pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = inputs['''generator'''] lowercase_ = inputs['''num_inference_steps'''] lowercase_ = inputs['''output_type'''] # inputs with prompt converted to embeddings lowercase_ = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: lowercase_ = image if mask_image is not None: lowercase_ = mask_image if original_image is not None: lowercase_ = original_image lowercase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0] lowercase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max() self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 ) def _lowercase ( self : Tuple ) -> Any: lowercase_ = self.get_dummy_components() lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) pipe_loaded.to(SCREAMING_SNAKE_CASE_ ) pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0] lowercase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max() self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
30
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: """simple docstring""" _UpperCAmelCase : List[str] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _UpperCAmelCase : Any = n - k # Calculate C(n,k) for i in range(_UpperCAmelCase ): result *= n - i result //= i + 1 return result def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1) def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) _UpperCAmelCase : List[str] = 1 for i in range(1 , n + 1 ): result *= i return result def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( F'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' F'binary trees and {catalan_number(node_count)} binary search trees.' )
31
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int ) -> list[list[int]]: """simple docstring""" a_ : list[list[int]] = [] a_ : list[int] = [] a_ : Dict = 0 a_ : Optional[int] = sum(__A ) create_state_space_tree(__A , __A , __A , __A , __A , __A ) return result def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int , __A : list[int] , __A : list[list[int]] , __A : int , ) -> None: """simple docstring""" if sum(__A ) > max_sum or (remaining_nums_sum + sum(__A )) < max_sum: return if sum(__A ) == max_sum: result.append(__A ) return for index in range(__A , len(__A ) ): create_state_space_tree( __A , __A , index + 1 , [*path, nums[index]] , __A , remaining_nums_sum - nums[index] , ) UpperCAmelCase_ : str = [3, 34, 4, 12, 5, 2] UpperCAmelCase_ : Optional[Any] = 9 UpperCAmelCase_ : str = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
32
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" def lowercase ( __snake_case : list ): for i in range(len(__snake_case ) - 1 , 0 , -1 ): lowercase_ : int = False for j in range(__snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowercase_ , lowercase_ : Dict = unsorted[j - 1], unsorted[j] lowercase_ : List[str] = True for j in range(__snake_case ): if unsorted[j] > unsorted[j + 1]: lowercase_ , lowercase_ : int = unsorted[j + 1], unsorted[j] lowercase_ : Dict = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __A : List[str] = input('''Enter numbers separated by a comma:\n''').strip() __A : str = [int(item) for item in user_input.split(''',''')] print(F"""{cocktail_shaker_sort(unsorted) = }""")
33
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( __a ): __a : Optional[Any] = (DEISMultistepScheduler,) __a : Any = (("""num_inference_steps""", 25),) def A ( self : Any , **lowercase : Optional[int] ): '''simple docstring''' UpperCAmelCase = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**lowercase ) return config def A ( self : Union[str, Any] , lowercase : Optional[Any]=0 , **lowercase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config(**lowercase ) UpperCAmelCase = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) UpperCAmelCase = scheduler_class.from_pretrained(lowercase ) new_scheduler.set_timesteps(lowercase ) # copy over dummy past residuals UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase , UpperCAmelCase = sample, sample for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ): UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : int ): '''simple docstring''' pass def A ( self : str , lowercase : Any=0 , **lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) UpperCAmelCase = scheduler_class.from_pretrained(lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : Any , lowercase : List[str]=None , **lowercase : List[Any] ): '''simple docstring''' if scheduler is None: UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**lowercase ) UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**lowercase ) UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = 10 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = model(lowercase , lowercase ) UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample return sample def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ): scheduler.set_timesteps(lowercase ) elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ): UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] UpperCAmelCase = scheduler.timesteps[5] UpperCAmelCase = scheduler.timesteps[6] UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) UpperCAmelCase = self.full_loop(scheduler=lowercase ) UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = self.full_loop(scheduler=lowercase ) UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def A ( self : Dict ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=lowercase ) def A ( self : int ): '''simple docstring''' self.check_over_configs(thresholding=lowercase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type='''deis''' , solver_order=lowercase , solver_type=lowercase , ) def A ( self : Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase ) def A ( self : Tuple ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , ) UpperCAmelCase = self.full_loop( solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , ) assert not torch.isnan(lowercase ).any(), "Samples have nan numbers" def A ( self : int ): '''simple docstring''' self.check_over_configs(lower_order_final=lowercase ) self.check_over_configs(lower_order_final=lowercase ) def A ( self : List[Any] ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=lowercase , time_step=0 ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.full_loop() UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 ) UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = 10 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = model(lowercase , lowercase ) UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample assert sample.dtype == torch.floataa
34
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for attribute in key.split(""".""" ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case__ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : List[str] = value elif weight_type == "weight_v": snake_case__ : List[str] = value elif weight_type == "bias": snake_case__ : Optional[Any] = value else: snake_case__ : str = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ : Optional[int] = None for name, value in fairseq_dict.items(): snake_case__ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) snake_case__ : Union[str, Any] = True elif name.split(""".""" )[0] == "proj": snake_case__ : Tuple = fairseq_model.proj snake_case__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2] snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: snake_case__ : str = """weight_g""" elif "weight_v" in name: snake_case__ : int = """weight_v""" elif "bias" in name: snake_case__ : Dict = """bias""" elif "weight" in name: snake_case__ : Union[str, Any] = """weight""" else: snake_case__ : Union[str, Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : int = full_name.split("""conv_layers.""" )[-1] snake_case__ : Dict = name.split(""".""" ) snake_case__ : Any = int(items[0] ) snake_case__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case__ : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case__ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ , snake_case__ : str = emb.weight.shape snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) snake_case__ : List[str] = emb.weight.data return lin_layer def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f: snake_case__ : int = f.readlines() snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines] snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) snake_case__ : Any = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() # set weights for wav2vec2 encoder snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase ) snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) snake_case__ : Tuple = False # add projection layer snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight ) snake_case__ : int = nn.Parameter(projection_layer.bias ) snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Optional[Any] = tokenizer.bos_token_id snake_case__ : int = tokenizer.eos_token_id snake_case__ : str = """speech_to_text_2""" snake_case__ : List[Any] = """wav2vec2""" snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
35
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _snake_case = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. _snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS) _snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING _snake_case = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"config.{attribute}" in modeling_source or F"getattr(config, \"{attribute}\"" in modeling_source or F"getattr(self.config, \"{attribute}\"" in modeling_source ): _lowerCAmelCase : Any = True # Deal with multi-line cases elif ( re.search( rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCamelCase , ) is not None ): _lowerCAmelCase : Union[str, Any] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: _lowerCAmelCase : Optional[int] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files _lowerCAmelCase : Tuple = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] _lowerCAmelCase : List[str] = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed _lowerCAmelCase : Dict = True if not attribute_used: _lowerCAmelCase : List[str] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: _lowerCAmelCase : Tuple = True elif attribute in ["tie_word_embeddings"] and default_value is False: _lowerCAmelCase : Any = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: _lowerCAmelCase : Dict = True elif attribute.endswith("_token_id" ): _lowerCAmelCase : Any = True # configuration class specific cases if not case_allowed: _lowerCAmelCase : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) _lowerCAmelCase : Optional[Any] = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = dict(inspect.signature(config_class.__init__ ).parameters ) _lowerCAmelCase : List[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] _lowerCAmelCase : List[Any] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass _lowerCAmelCase : Optional[Any] = {} if len(config_class.attribute_map ) > 0: _lowerCAmelCase : Any = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files _lowerCAmelCase : Optional[Any] = inspect.getsourcefile(_lowerCamelCase ) _lowerCAmelCase : str = os.path.dirname(_lowerCamelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. _lowerCAmelCase : Any = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith("modeling_" )] # Get the source code strings _lowerCAmelCase : Union[str, Any] = [] for path in modeling_paths: if os.path.isfile(_lowerCamelCase ): with open(_lowerCamelCase ) as fp: modeling_sources.append(fp.read() ) _lowerCAmelCase : List[str] = [] for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ): # `attributes` here is all the variant names for `config_param` _lowerCAmelCase : int = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): unused_attributes.append(attributes[0] ) return sorted(_lowerCamelCase ) def A ( ): '''simple docstring''' _lowerCAmelCase : int = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) _lowerCAmelCase : Tuple = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(_lowerCamelCase ) and issubclass(_lowerCamelCase , _lowerCamelCase ) and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: _lowerCAmelCase : Optional[int] = check_config_attributes_being_used(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: _lowerCAmelCase : Dict = unused_attributes if len(_lowerCamelCase ) > 0: _lowerCAmelCase : Dict = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += F"{name}: {attributes}\n" raise ValueError(_lowerCamelCase ) if __name__ == "__main__": check_config_attributes()
36
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
'''simple docstring''' import torch from diffusers import DiffusionPipeline class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: super().__init__() self.register_modules(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) def __call__( self ) -> str: lowerCAmelCase__ : Tuple = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,) lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : Union[str, Any] = self.unet(__UpperCAmelCase ,__UpperCAmelCase ).sample lowerCAmelCase__ : List[Any] = self.scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ).prev_sample lowerCAmelCase__ : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(__UpperCAmelCase ) return result
37
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : int = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Tuple = """markuplm""" def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any]=30_522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Optional[int]=3_072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : int=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Union[str, Any]=1_024 , __lowerCamelCase : Any=216 , __lowerCamelCase : Optional[Any]=1_001 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Dict=50 , __lowerCamelCase : Optional[int]="absolute" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=None , **__lowerCamelCase : str , ): super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :str = vocab_size UpperCamelCase :List[str] = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :Dict = num_attention_heads UpperCamelCase :int = hidden_act UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[Any] = hidden_dropout_prob UpperCamelCase :Any = attention_probs_dropout_prob UpperCamelCase :Union[str, Any] = max_position_embeddings UpperCamelCase :Optional[Any] = type_vocab_size UpperCamelCase :Union[str, Any] = initializer_range UpperCamelCase :List[str] = layer_norm_eps UpperCamelCase :Optional[Any] = position_embedding_type UpperCamelCase :Optional[Any] = use_cache UpperCamelCase :Any = classifier_dropout # additional properties UpperCamelCase :Dict = max_depth UpperCamelCase :List[Any] = max_xpath_tag_unit_embeddings UpperCamelCase :List[Any] = max_xpath_subs_unit_embeddings UpperCamelCase :Dict = tag_pad_id UpperCamelCase :Optional[Any] = subs_pad_id UpperCamelCase :Any = xpath_unit_hidden_size
38
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _a = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def __A ( __lowerCAmelCase = "mumbai" )-> Generator[tuple[str, str], None, None]: """simple docstring""" _UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ): _UpperCAmelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip() _UpperCAmelCase = job.find('span' , {'class': 'company'} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
39
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _A : """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): if dst_width < 0 or dst_height < 0: raise ValueError("Destination width/height should be > 0") a : Union[str, Any] = img a : Optional[int] = img.shape[1] a : Dict = img.shape[0] a : Dict = dst_width a : Any = dst_height a : str = self.src_w / self.dst_w a : Dict = self.src_h / self.dst_h a : Dict = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255 ) def __snake_case ( self : Tuple): for i in range(self.dst_h): for j in range(self.dst_w): a : List[str] = self.img[self.get_y(__UpperCAmelCase)][self.get_x(__UpperCAmelCase)] def __snake_case ( self : Dict , __UpperCAmelCase : int): return int(self.ratio_x * x) def __snake_case ( self : Optional[int] , __UpperCAmelCase : int): return int(self.ratio_y * y) if __name__ == "__main__": __lowercase , __lowercase = 800, 600 __lowercase = imread("""image_data/lena.jpg""", 1) __lowercase = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
40
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
'''simple docstring''' from datetime import datetime as dt import os from github import Github _A : List[str] =[ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def SCREAMING_SNAKE_CASE_ () -> Any: lowerCamelCase__ : Tuple = Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase__ : Tuple = g.get_repo("""huggingface/transformers""" ) lowerCamelCase__ : Union[str, Any] = repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase__ : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = comments[0] if len(UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
41
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : str = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' a__ : List[str] = ["""keras_nlp"""] def __init__( self , *__lowercase , **__lowercase) -> Tuple: requires_backends(self , ['''keras_nlp'''])
43
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _a : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
44
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
"""simple docstring""" from heapq import heappop, heappush import numpy as np def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : bool , ) -> tuple[float | int, list[tuple[int, int]]]: __a , __a = grid.shape __a = [-1, 1, 0, 0] __a = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __a , __a = [(0, source)], set() __a = np.full((rows, cols) , np.inf ) __a = 0 __a = np.empty((rows, cols) , dtype=lowerCAmelCase__ ) __a = None while queue: ((__a) , (__a)) = heappop(lowerCAmelCase__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __a = [] while (x, y) != source: path.append((x, y) ) __a , __a = predecessors[x, y] path.append(lowerCAmelCase__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(lowerCAmelCase__ ) ): __a , __a = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __a = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) ) __a = dist + 1 __a = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
45
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModel) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
46
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class A__ : def __init__( self : int , _a : Optional[int] , _a : Optional[Any]=13 , _a : List[Any]=7 , _a : List[Any]=True , _a : str=True , _a : Dict=False , _a : str=True , _a : List[str]=99 , _a : str=64 , _a : Union[str, Any]=5 , _a : List[Any]=4 , _a : Optional[Any]=64 , _a : Tuple="gelu" , _a : List[str]=0.1 , _a : str=0.1 , _a : int=512 , _a : Dict=16 , _a : List[str]=2 , _a : int=0.02 , _a : Union[str, Any]=3 , _a : str=4 , _a : Tuple=None , ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =seq_length _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =use_input_mask _SCREAMING_SNAKE_CASE =use_token_type_ids _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =intermediate_size _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =hidden_dropout_prob _SCREAMING_SNAKE_CASE =attention_probs_dropout_prob _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =type_vocab_size _SCREAMING_SNAKE_CASE =type_sequence_label_size _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =num_labels _SCREAMING_SNAKE_CASE =num_choices _SCREAMING_SNAKE_CASE =scope def A ( self : str ) -> Tuple: '''simple docstring''' return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def A ( self : int ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE =None if self.use_input_mask: _SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None if self.use_labels: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE =self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Dict ) -> Dict: '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A ( self : Tuple , _a : Dict , _a : Dict , _a : Dict , _a : int , _a : Optional[int] , _a : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =MPNetModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , _a ) _SCREAMING_SNAKE_CASE =model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : Union[str, Any] , _a : List[Any] , _a : Dict , _a : List[Any] , _a : Any , _a : List[Any] , _a : Dict ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MPNetForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Tuple , _a : Any , _a : List[Any] , _a : List[Any] , _a : Dict , _a : List[str] , _a : List[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MPNetForSequenceClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , _a : int , _a : Optional[Any] , _a : List[Any] , _a : Tuple , _a : Tuple , _a : int ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_choices _SCREAMING_SNAKE_CASE =MPNetForMultipleChoice(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Any , _a : int , _a : Optional[Any] , _a : Optional[Any] , _a : str , _a : List[Any] , _a : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MPNetForTokenClassification(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Any ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =config_and_inputs _SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): A__ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) A__ = False A__ = True def A ( self : int ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MPNetModelTester(self ) _SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 ) def A ( self : List[Any] ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*_a ) def A ( self : List[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a ) def A ( self : Optional[int] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a ) def A ( self : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*_a ) def A ( self : str ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*_a ) @require_torch class A__ ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MPNetModel.from_pretrained('microsoft/mpnet-base' ) _SCREAMING_SNAKE_CASE =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
47
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def A ( ) -> Optional[Any]: lowerCamelCase : str = torch.nn.Linear(2 ,4 ) lowerCamelCase : Optional[int] = torch.optim.AdamW(model.parameters() ,lr=1.0 ) lowerCamelCase : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(_SCREAMING_SNAKE_CASE ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 ) lowerCamelCase : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) lowerCamelCase : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def A ( _SCREAMING_SNAKE_CASE ) -> List[Any]: lowerCamelCase : Optional[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_SCREAMING_SNAKE_CASE ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' @require_cuda def _lowercase ( self ) -> Any: lowerCamelCase : Optional[Any] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : List[str] = Accelerator(cpu=UpperCamelCase__ ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = Accelerator() lowerCamelCase : Optional[Any] = GradientState() assert state.num_steps == 1 lowerCamelCase : Any = 4 assert state.num_steps == 4 assert state.sync_gradients is True lowerCamelCase : Dict = False assert state.sync_gradients is False GradientState._reset_state() def _lowercase ( self ) -> Dict: lowerCamelCase : Tuple = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = create_components() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : List[Any] = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : str = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = create_components() accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def _lowercase ( self ) -> int: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*UpperCamelCase__ , **UpperCamelCase__ ): pass with patch("torch.cuda.set_device" , UpperCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): lowerCamelCase : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def _lowercase ( self ) -> Dict: lowerCamelCase : Union[str, Any] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = create_components() accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = get_signature(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase__ ) # make sure random weights don't match load_random_weights(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 ) def _lowercase ( self ) -> List[str]: lowerCamelCase : List[str] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = create_components() accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Dict = get_signature(UpperCamelCase__ ) # saving hook def save_config(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = {"class_name": models[0].__class__.__name__} with open(os.path.join(UpperCamelCase__ , "data.json" ) , "w" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) # loading hook def load_config(UpperCamelCase__ , UpperCamelCase__ ): with open(os.path.join(UpperCamelCase__ , "data.json" ) , "r" ) as f: lowerCamelCase : str = json.load(UpperCamelCase__ ) lowerCamelCase : int = config["class_name"] lowerCamelCase : Any = accelerator.register_save_state_pre_hook(UpperCamelCase__ ) lowerCamelCase : Dict = accelerator.register_load_state_pre_hook(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase__ ) # make sure random weights don't match with hooks load_random_weights(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCamelCase : Optional[int] = "random" # make sure loaded weights match with hooks accelerator.load_state(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase__ ) # make sure random weights don't match with hooks removed load_random_weights(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCamelCase : List[Any] = "random" # make sure loaded weights match with hooks removed accelerator.load_state(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = create_components() lowerCamelCase : Optional[int] = None # This should work lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertTrue(dummy_obj is None ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[str] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = create_components() lowerCamelCase : Optional[int] = [1, 2, 3] # This should work lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def _lowercase ( self ) -> int: from transformers import AutoModelForCausalLM lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase__ , device_map={"": 0} , ) lowerCamelCase : Any = Accelerator() # This should work lowerCamelCase : Tuple = accelerator.prepare(UpperCamelCase__ ) @slow @require_bnb def _lowercase ( self ) -> str: from transformers import AutoModelForCausalLM lowerCamelCase : Tuple = Accelerator() with init_empty_weights(): lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCamelCase : List[Any] = infer_auto_device_map(UpperCamelCase__ ) lowerCamelCase : Optional[int] = "cpu" lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase__ ) # This should not work and get value error with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : List[str] = accelerator.prepare(UpperCamelCase__ ) @slow @require_bnb @require_multi_gpu def _lowercase ( self ) -> int: from transformers import AutoModelForCausalLM lowerCamelCase : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): lowerCamelCase : str = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCamelCase : Union[str, Any] = infer_auto_device_map(UpperCamelCase__ ) lowerCamelCase : Dict = 1 lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , ) lowerCamelCase : Optional[Any] = Accelerator() # This should not work and get value error with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase__ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def _lowercase ( self ) -> str: from transformers import AutoModelForCausalLM with init_empty_weights(): lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) lowerCamelCase : int = infer_auto_device_map(UpperCamelCase__ ) lowerCamelCase : Any = 1 lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , ) lowerCamelCase : str = Accelerator() # This should work lowerCamelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase__ ) @require_cuda def _lowercase ( self ) -> Dict: lowerCamelCase : str = torch.nn.Linear(10 , 10 ) lowerCamelCase : int = torch.optim.SGD(model.parameters() , lr=0.01 ) lowerCamelCase : int = Accelerator(cpu=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase__ )
48
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class _A : UpperCamelCase__ : Dict = BlenderbotSmallConfig UpperCamelCase__ : Tuple = {} UpperCamelCase__ : str = '''gelu''' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ): '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __a = tf.concat([input_ids, eos_tensor] , axis=1) __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __a = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return config, inputs_dict def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE).get_decoder() __a = inputs_dict['''input_ids'''] __a = input_ids[:1, :] __a = inputs_dict['''attention_mask'''][:1, :] __a = inputs_dict['''head_mask'''] __a = 1 # first forward pass __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE) __a , __a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size) __a = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and __a = tf.concat([input_ids, next_tokens] , axis=-1) __a = tf.concat([attention_mask, next_attn_mask] , axis=-1) __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0] __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice __a = int(ids_tensor((1,) , output_from_past.shape[-1])) __a = output_from_no_past[:, -3:, random_slice_idx] __a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: __a = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) UpperCamelCase__ : Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ : str = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ : Optional[int] = True UpperCamelCase__ : int = False UpperCamelCase__ : str = False def _lowerCamelCase ( self : int): '''simple docstring''' __a = TFBlenderbotSmallModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE) @require_tokenizers @require_tf class _A ( unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] UpperCamelCase__ : str = '''facebook/blenderbot_small-90M''' @cached_property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''') @cached_property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model @slow def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.tokenizer(self.src_text , return_tensors='''tf''') __a = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , ) __a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
49
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
import warnings from functools import wraps from typing import Callable def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Callable: @wraps(_UpperCAmelCase ) def _inner_fn(*_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _UpperCAmelCase , ) return fn(*_UpperCAmelCase , **_UpperCAmelCase ) return _inner_fn
50
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Any = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
51
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : str = { """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json""" # See all FNet models at https://huggingface.co/models?filter=fnet } class A__ ( __snake_case ): _UpperCAmelCase :str = 'fnet' def __init__( self , A_=3_2000 , A_=768 , A_=12 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=512 , A_=4 , A_=0.02 , A_=1e-12 , A_=False , A_=512 , A_=3 , A_=1 , A_=2 , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Any = vocab_size UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = hidden_size UpperCamelCase : List[str] = num_hidden_layers UpperCamelCase : Any = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : Dict = initializer_range UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : str = layer_norm_eps UpperCamelCase : List[str] = use_tpu_fourier_optimizations UpperCamelCase : Optional[int] = tpu_short_seq_length
52
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a__ : Dict =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] =["pixel_values"] def __init__( self : int , __A : bool = True , __A : Dict[str, int] = None , __A : float = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Union[int, float] = 1 / 2_5_5 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : Tuple , ): super().__init__(**__A ) __UpperCamelCase = size if size is not None else {'shortest_edge': 3_8_4} __UpperCamelCase = get_size_dict(__A , default_to_square=__A ) __UpperCamelCase = do_resize __UpperCamelCase = size # Default value set here for backwards compatibility where the value in config is None __UpperCamelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __UpperCamelCase = resample __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self : int , __A : np.ndarray , __A : Dict[str, int] , __A : float , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ): __UpperCamelCase = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) __UpperCamelCase = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __UpperCamelCase = int(shortest_edge / crop_pct ) __UpperCamelCase = get_resize_output_image_size(__A , size=__A , default_to_square=__A ) __UpperCamelCase = resize(image=__A , size=__A , resample=__A , data_format=__A , **__A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A ) def _lowerCamelCase ( self : int , __A : np.ndarray , __A : Union[int, float] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[Any] , ): return rescale(__A , scale=__A , data_format=__A , **__A ) def _lowerCamelCase ( self : Tuple , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] , ): return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def _lowerCamelCase ( self : str , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : float = None , __A : PILImageResampling = None , __A : bool = None , __A : float = None , __A : bool = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : ChannelDimension = ChannelDimension.FIRST , **__A : List[str] , ): __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(__A , default_to_square=__A ) __UpperCamelCase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__A ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] __UpperCamelCase = [to_channel_dimension_format(__A , __A ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__A , tensor_type=__A )
53
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) __SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __SCREAMING_SNAKE_CASE = 1 if upper_limit > 0: __SCREAMING_SNAKE_CASE = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowerCAmelCase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: a__ : List[str] = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"The Catalan numbers from 0 through {N} are:") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
54
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=2 , UpperCamelCase=8 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=16 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=36 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.get_config() lowerCamelCase_ = 300 return config def snake_case ( self ): """simple docstring""" ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = self.prepare_config_and_inputs() lowerCamelCase_ = True lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = MraModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = True lowerCamelCase_ = MraModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , ) lowerCamelCase_ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , ) lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = MraForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = MraForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = MraForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = MraForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_choices lowerCamelCase_ = MraForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = () def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MraModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = MraModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip(reason="MRA does not output attentions" ) def snake_case ( self ): """simple docstring""" return @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) lowerCamelCase_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCamelCase_ = model(UpperCamelCase )[0] lowerCamelCase_ = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) lowerCamelCase_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCamelCase_ = model(UpperCamelCase )[0] lowerCamelCase_ = 5_0265 lowerCamelCase_ = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) lowerCamelCase_ = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): lowerCamelCase_ = model(UpperCamelCase )[0] lowerCamelCase_ = 5_0265 lowerCamelCase_ = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
55
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__UpperCAmelCase, n - 1, __UpperCAmelCase ) * a) % mod else: snake_case_ = binary_exponentiation(__UpperCAmelCase, n / 2, __UpperCAmelCase ) return (b * b) % mod # a prime number a : Union[str, Any] = 701 a : Optional[int] = 10_0000_0000 a : List[Any] = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
56
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def snake_case ( self , __a=0 ): __lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__a ) ) __lowerCAmelCase = np.random.RandomState(__a ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.7_5, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) # warmup pass to apply optimizations __lowerCAmelCase = pipe(**self.get_dummy_inputs() ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case ( self ): __lowerCAmelCase = ort.SessionOptions() __lowerCAmelCase = False return options def snake_case ( self ): __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = "A fantasy landscape, trending on artstation" __lowerCAmelCase = np.random.RandomState(0 ) __lowerCAmelCase = pipe( prompt=__a , image=__a , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) __lowerCAmelCase = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def snake_case ( self ): __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowerCAmelCase = init_image.resize((7_68, 5_12) ) __lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = "A fantasy landscape, trending on artstation" __lowerCAmelCase = np.random.RandomState(0 ) __lowerCAmelCase = pipe( prompt=__a , image=__a , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) __lowerCAmelCase = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
57
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""", } class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = '''bloom''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self , A=25_0880 , A=64 , A=2 , A=8 , A=1e-5 , A=0.02 , A=True , A=1 , A=2 , A=False , A=0.0 , A=0.0 , A=1 , A=False , **A , ) -> Tuple: _SCREAMING_SNAKE_CASE = vocab_size # Backward compatibility with n_embed kwarg _SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" , A ) _SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed _SCREAMING_SNAKE_CASE = n_layer _SCREAMING_SNAKE_CASE = n_head _SCREAMING_SNAKE_CASE = layer_norm_epsilon _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = use_cache _SCREAMING_SNAKE_CASE = pretraining_tp _SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm _SCREAMING_SNAKE_CASE = hidden_dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = bos_token_id _SCREAMING_SNAKE_CASE = eos_token_id _SCREAMING_SNAKE_CASE = slow_but_exact super().__init__(bos_token_id=A , eos_token_id=A , **A ) class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = version.parse('''1.12''' ) def __init__( self , A , A = "default" , A = None , A = False , ) -> str: super().__init__(A , task=A , patching_specs=A , use_past=A ) if not getattr(self._config , """pad_token_id""" , A ): # TODO: how to do that better? _SCREAMING_SNAKE_CASE = 0 @property def snake_case_( self ) -> Mapping[str, Mapping[int, str]]: _SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(A , direction="""inputs""" , inverted_values_shape=A ) _SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""} else: _SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return common_inputs @property def snake_case_( self ) -> int: return self._config.n_layer @property def snake_case_( self ) -> int: return self._config.n_head @property def snake_case_( self ) -> float: return 1e-3 def snake_case_( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: _SCREAMING_SNAKE_CASE = super(A , self ).generate_dummy_inputs( A , batch_size=A , seq_length=A , is_pair=A , framework=A ) # We need to order the input in the way they appears in the forward() _SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _SCREAMING_SNAKE_CASE = seqlen + 2 _SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads _SCREAMING_SNAKE_CASE = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) _SCREAMING_SNAKE_CASE = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) _SCREAMING_SNAKE_CASE = [ (torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers ) ] _SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""] if self.use_past: _SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype _SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 ) return ordered_inputs @property def snake_case_( self ) -> int: return 13
58
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' snake_case : Any = parent snake_case : Optional[int] = batch_size snake_case : Union[str, Any] = image_size snake_case : Dict = patch_size snake_case : Optional[Any] = num_channels snake_case : Union[str, Any] = embed_dim snake_case : int = depths snake_case : List[str] = num_heads snake_case : Union[str, Any] = window_size snake_case : Union[str, Any] = mlp_ratio snake_case : List[Any] = qkv_bias snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = drop_path_rate snake_case : int = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : int = patch_norm snake_case : Union[str, Any] = layer_norm_eps snake_case : Any = initializer_range snake_case : Optional[Any] = is_training snake_case : Tuple = scope snake_case : Optional[int] = use_labels snake_case : Optional[Any] = type_sequence_label_size snake_case : Union[str, Any] = encoder_stride snake_case : Any = out_features snake_case : Tuple = out_indices def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : int = None if self.use_labels: snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : List[Any] = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case__ ): snake_case : Tuple = ["stem"] snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]: '''simple docstring''' snake_case : Union[str, Any] = self.prepare_config_and_inputs() snake_case , snake_case , snake_case : List[Any] = config_and_inputs snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : List[str] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[Any] = False A__ : List[str] = False A__ : List[str] = False A__ : Union[str, Any] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : str = MaskFormerSwinModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]: '''simple docstring''' return def _SCREAMING_SNAKE_CASE (self : Dict ) -> str: '''simple docstring''' snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) @unittest.skip("Swin does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' pass @unittest.skip("Swin does not support feedforward chunking" ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : str = model_class(snake_case__ ) snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[Any] = [*signature.parameters.keys()] snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: '''simple docstring''' pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case : int = outputs.hidden_states snake_case : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) # Swin has a different seq_length snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case : int = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Dict = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Any = 3 snake_case : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case : str = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case : Optional[Any] = True self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _SCREAMING_SNAKE_CASE (self : int ) -> str: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE (self : Any ) -> Any: '''simple docstring''' snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ): snake_case : Any = 0 return t def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ): with torch.no_grad(): snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ) snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has""" f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}.""" ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ ) snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase ,A_ ): A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ : int = MaskFormerSwinConfig def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = MaskFormerSwinModelTester(self ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: snake_case : Optional[int] = backbone_class(snake_case__ ) backbone.to(snake_case__ ) backbone.eval() snake_case : Union[str, Any] = backbone(**snake_case__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case , snake_case , snake_case : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ ) self.assertIsNotNone(outputs.attentions )
59
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class snake_case_: __UpperCamelCase = 42 __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = None __UpperCamelCase = field(default='''Translation''' , init=a__ , repr=a__ ) def __call__( self : Union[str, Any] ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase__ ( self : List[Any] ): from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class snake_case_: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = None __UpperCamelCase = field(default='''TranslationVariableLanguages''' , init=a__ , repr=a__ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None lowerCAmelCase : int = len(self.languages ) if self.languages else None def __call__( self : List[Any] ): return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] ): lowerCAmelCase : List[Any] = set(self.languages ) if self.languages and set(UpperCamelCase_ ) - lang_set: raise ValueError( F'''Some languages in example ({", ".join(sorted(set(UpperCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowerCAmelCase : List[str] = [] for lang, text in translation_dict.items(): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowerCAmelCase, lowerCAmelCase : Optional[Any] = zip(*sorted(UpperCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase__ ( self : Dict ): from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
60
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'facebook/deit-base-distilled-patch16-224': ( 'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = """deit""" def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=224 , lowercase_=16 , lowercase_=3 , lowercase_=True , lowercase_=16 , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : int = image_size UpperCAmelCase_ : int = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Optional[int] = qkv_bias UpperCAmelCase_ : Optional[int] = encoder_stride class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-4
61
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ): if index == r: for j in range(SCREAMING_SNAKE_CASE__ ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __UpperCamelCase =arr[i] combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): # A temporary array to store all combination one by one __UpperCamelCase =[0] * r # Print all combination using temporary array 'data[]' combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 ) if __name__ == "__main__": # Driver code to check the function above _A = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
62
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) _a = len(bin(lowercase )[3:] ) _a = bin(abs(lowercase ) - (1 << binary_number_length) )[3:] _a = ( ( "1" + "0" * (binary_number_length - len(lowercase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
63
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowercase( __a ): '''simple docstring''' def __init__( self: Any, a_: Any, a_: Tuple ): '''simple docstring''' _snake_case : Any = params _snake_case : int = np.array(a_ ) _snake_case : str = np.array([len(a_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self: List[str], a_: Dict ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self: Optional[Any] ): '''simple docstring''' return len(self.lengths ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = self.params.max_model_input_size _snake_case : str = self.lengths > max_len logger.info(f"Splitting {sum(a_ )} too long sequences." ) def divide_chunks(a_: Tuple, a_: int ): return [l[i : i + n] for i in range(0, len(a_ ), a_ )] _snake_case : List[Any] = [] _snake_case : Optional[int] = [] if self.params.mlm: _snake_case , _snake_case : Optional[int] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: _snake_case , _snake_case : Optional[int] = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids, self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _snake_case : Optional[Any] = [] for sub_s in divide_chunks(seq_, max_len - 2 ): if sub_s[0] != cls_id: _snake_case : Optional[int] = np.insert(a_, 0, a_ ) if sub_s[-1] != sep_id: _snake_case : List[Any] = np.insert(a_, len(a_ ), a_ ) assert len(a_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a_ ) new_tok_ids.extend(a_ ) new_lengths.extend([len(a_ ) for l in sub_seqs] ) _snake_case : Union[str, Any] = np.array(a_ ) _snake_case : List[Any] = np.array(a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = len(self ) _snake_case : int = self.lengths > 11 _snake_case : str = self.token_ids[indices] _snake_case : int = self.lengths[indices] _snake_case : int = len(self ) logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: _snake_case : Dict = self.params.special_tok_ids["""unk_token"""] _snake_case : Optional[int] = len(self ) _snake_case : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _snake_case : Dict = (unk_occs / self.lengths) < 0.5 _snake_case : int = self.token_ids[indices] _snake_case : Optional[Any] = self.lengths[indices] _snake_case : Dict = len(self ) logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' if not self.params.is_master: return logger.info(f"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase_ ( self: Dict, a_: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [t[0] for t in batch] _snake_case : int = [t[1] for t in batch] assert len(a_ ) == len(a_ ) # Max for paddings _snake_case : int = max(a_ ) # Pad token ids if self.params.mlm: _snake_case : Any = self.params.special_tok_ids["""pad_token"""] else: _snake_case : Optional[Any] = self.params.special_tok_ids["""unk_token"""] _snake_case : Dict = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids] assert len(tk_ ) == len(a_ ) assert all(len(a_ ) == max_seq_len_ for t in tk_ ) _snake_case : List[str] = torch.tensor(tk_ ) # (bs, max_seq_len_) _snake_case : Optional[Any] = torch.tensor(a_ ) # (bs) return tk_t, lg_t
64
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class A ( UpperCAmelCase_ ): __UpperCAmelCase : List[str] = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCAmelCase : Tuple = 'CIDAS/clipseg-rd64-refined' __UpperCAmelCase : str = 'image_segmenter' __UpperCAmelCase : Any = CLIPSegForImageSegmentation __UpperCAmelCase : str = ['image', 'text'] __UpperCAmelCase : Tuple = ['image'] def __init__(self : Union[str, Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : Dict ) -> Optional[int]: """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : List[str] , __UpperCAmelCase : "Image" , __UpperCAmelCase : str ) -> Tuple: """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=__UpperCAmelCase , return_tensors="pt" ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : str ) -> int: """simple docstring""" with torch.no_grad(): UpperCAmelCase__ = self.model(**__UpperCAmelCase ).logits return logits def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int ) -> int: """simple docstring""" UpperCAmelCase__ = outputs.cpu().detach().numpy() UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
65
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :Tuple = tempfile.mkdtemp() snake_case_ :Optional[Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case_ :List[str] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } snake_case_ :List[str] = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(snake_case , snake_case ) def lowerCAmelCase_ ( self: Dict , **snake_case: Any ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: List[str] ) -> Union[str, Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: Dict ) -> Any: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self: List[Any] ) -> int: snake_case_ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ :Dict = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self: List[Any] ) -> Any: snake_case_ :Optional[Any] = self.get_tokenizer() snake_case_ :Tuple = self.get_rust_tokenizer() snake_case_ :Optional[int] = self.get_image_processor() snake_case_ :List[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ :Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ :int = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def lowerCAmelCase_ ( self: str ) -> int: snake_case_ :List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ :List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ :Optional[Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) snake_case_ :int = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case_ :Tuple = self.get_image_processor() snake_case_ :List[str] = self.get_tokenizer() snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Optional[Any] = self.prepare_image_inputs() snake_case_ :Optional[int] = image_processor(snake_case , return_tensors="""np""" ) snake_case_ :Optional[Any] = processor(images=snake_case , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_ :List[str] = self.get_image_processor() snake_case_ :List[Any] = self.get_tokenizer() snake_case_ :Dict = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Optional[Any] = """lower newer""" snake_case_ :int = processor(text=snake_case ) snake_case_ :Optional[Any] = tokenizer(snake_case , padding="""max_length""" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]: snake_case_ :Any = self.get_image_processor() snake_case_ :Optional[int] = self.get_tokenizer() snake_case_ :Union[str, Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Dict = """lower newer""" snake_case_ :Dict = self.prepare_image_inputs() snake_case_ :Dict = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_ :Optional[Any] = self.get_image_processor() snake_case_ :Optional[Any] = self.get_tokenizer() snake_case_ :str = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ :int = processor.batch_decode(snake_case ) snake_case_ :List[str] = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> List[str]: snake_case_ :List[Any] = self.get_image_processor() snake_case_ :str = self.get_tokenizer() snake_case_ :Optional[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :List[Any] = """lower newer""" snake_case_ :Dict = self.prepare_image_inputs() snake_case_ :int = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
66
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class a__ : def __init__( self : Tuple , a : Optional[int] , a : Optional[Any]=13 , a : List[Any]=64 , a : str=2 , a : int=3 , a : Union[str, Any]=True , a : List[str]=True , a : str=32 , a : List[Any]=5 , a : int=4 , a : Optional[int]=37 , a : Any="gelu" , a : List[Any]=0.1 , a : Tuple=0.1 , a : int=10 , a : Optional[int]=0.02 , a : int=[1, 16, 4, 4] , a : Dict=None , ): """simple docstring""" __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size __lowerCamelCase = (self.image_size // 32) ** 2 __lowerCamelCase = num_patches + 1 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" __lowerCamelCase = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Any , a : Optional[int] , a : Tuple ): """simple docstring""" __lowerCamelCase = ViTHybridModel(config=a ) model.to(a ) model.eval() __lowerCamelCase = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , a : Dict , a : Dict , a : Union[str, Any] ): """simple docstring""" __lowerCamelCase = self.type_sequence_label_size __lowerCamelCase = ViTHybridForImageClassification(a ) model.to(a ) model.eval() __lowerCamelCase = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =(ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () lowerCamelCase : Tuple =( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) lowerCamelCase : List[Any] =False lowerCamelCase : Tuple =False lowerCamelCase : List[str] =False def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = ViTHybridModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(a ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = _config_zero_init(a ) for model_class in self.all_model_classes: __lowerCamelCase = model_class(config=a ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": __lowerCamelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = ViTHybridModel.from_pretrained(a ) self.assertIsNotNone(a ) def __lowerCAmelCase ( ) -> Optional[int]: __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( a ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=a , return_tensors='''pt''' ).to(a ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**a ) # verify the logits __lowerCamelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , a ) __lowerCamelCase = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow @require_accelerate def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) __lowerCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=a , return_tensors='''pt''' ) __lowerCamelCase = model(**a ) __lowerCamelCase = outputs.logits # model predicts one of the 1000 ImageNet classes __lowerCamelCase = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
67
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase=32 ) -> Union[str, Any]: '''simple docstring''' set_seed(0 ) A__ = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 ) A__ = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable A__ = DDPMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowercase , ) A__ = DDIMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowercase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) A__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )] A__ = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )] A__ = [torch.randint(0 , 1000 , (4,) ).long().to(lowercase ) for _ in range(4 )] # train with a DDPM scheduler A__ , A__ = self.get_model_optimizer(resolution=32 ) model.train().to(lowercase ) for i in range(4 ): optimizer.zero_grad() A__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) A__ = model(lowercase , timesteps[i] ).sample A__ = torch.nn.functional.mse_loss(lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM A__ , A__ = self.get_model_optimizer(resolution=32 ) model.train().to(lowercase ) for i in range(4 ): optimizer.zero_grad() A__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) A__ = model(lowercase , timesteps[i] ).sample A__ = torch.nn.functional.mse_loss(lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) ) self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
68
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''spiece.model'''} __UpperCamelCase = { '''vocab_file''': { '''bert_for_seq_generation''': ( '''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model''' ), } } __UpperCamelCase = {'''bert_for_seq_generation''': 512} class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<::::>", lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None: snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(lowerCAmelCase__) @property def a_ ( self) -> Tuple: return self.sp_model.get_piece_size() def a_ ( self) -> Optional[Any]: snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self, lowerCAmelCase__) -> Optional[Any]: snake_case_ = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs'): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self, lowerCAmelCase__) -> List[str]: return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: return self.sp_model.piece_to_id(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = self.sp_model.IdToPiece(lowerCAmelCase__) return token def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = [] snake_case_ = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase__) + token snake_case_ = [] else: current_sub_tokens.append(lowerCAmelCase__) out_string += self.sp_model.decode(lowerCAmelCase__) return out_string.strip() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, lowerCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(lowerCAmelCase__, 'wb') as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__) return (out_vocab_file,)
69
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : Dict =logging.get_logger(__name__) A__ : Tuple ={ '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase ( snake_case_ , snake_case_ ): _lowercase: Optional[Any] = '''swin''' _lowercase: Union[str, Any] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] , __snake_case : Dict=2_24 , __snake_case : List[str]=4 , __snake_case : Optional[Any]=3 , __snake_case : Optional[int]=96 , __snake_case : Tuple=[2, 2, 6, 2] , __snake_case : Optional[int]=[3, 6, 12, 24] , __snake_case : Optional[Any]=7 , __snake_case : List[Any]=4.0 , __snake_case : List[Any]=True , __snake_case : Tuple=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict="gelu" , __snake_case : int=False , __snake_case : Optional[Any]=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : str=32 , __snake_case : Any=None , __snake_case : Tuple=None , **__snake_case : Tuple , ) -> int: super().__init__(**__snake_case ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(__snake_case ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(__snake_case ) - 1) ) _lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )] _lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices( out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names ) class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = version.parse('''1.11''' ) @property def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : Union[str, Any] ) -> float: return 1E-4
70
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __A ( a ): """simple docstring""" UpperCamelCase__ : jnp.ndarray UpperCamelCase__ : jnp.ndarray class __A ( nn.Module ): """simple docstring""" UpperCamelCase__ : int UpperCamelCase__ : Tuple[int] =(1_6, 3_2, 9_6, 2_5_6) UpperCamelCase__ : jnp.dtype =jnp.floataa def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCamelCase : Tuple =[] for i in range(len(self.block_out_channels ) - 1 ): __UpperCamelCase : Dict =self.block_out_channels[i] __UpperCamelCase : Optional[int] =self.block_out_channels[i + 1] __UpperCamelCase : Optional[int] =nn.Conv( lowerCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Conv( lowerCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(lowerCamelCase__ ) __UpperCamelCase : int =blocks __UpperCamelCase : str =nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : List[str] =self.conv_in(lowerCamelCase__ ) __UpperCamelCase : Dict =nn.silu(lowerCamelCase__ ) for block in self.blocks: __UpperCamelCase : str =block(lowerCamelCase__ ) __UpperCamelCase : Tuple =nn.silu(lowerCamelCase__ ) __UpperCamelCase : Any =self.conv_out(lowerCamelCase__ ) return embedding @flax_register_to_config class __A ( nn.Module , a , a ): """simple docstring""" UpperCamelCase__ : int =3_2 UpperCamelCase__ : int =4 UpperCamelCase__ : Tuple[str] =( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) UpperCamelCase__ : Union[bool, Tuple[bool]] =False UpperCamelCase__ : Tuple[int] =(3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) UpperCamelCase__ : int =2 UpperCamelCase__ : Union[int, Tuple[int]] =8 UpperCamelCase__ : Optional[Union[int, Tuple[int]]] =None UpperCamelCase__ : int =1_2_8_0 UpperCamelCase__ : float =0.0 UpperCamelCase__ : bool =False UpperCamelCase__ : jnp.dtype =jnp.floataa UpperCamelCase__ : bool =True UpperCamelCase__ : int =0 UpperCamelCase__ : str ="rgb" UpperCamelCase__ : Tuple[int] =(1_6, 3_2, 9_6, 2_5_6) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =(1, self.in_channels, self.sample_size, self.sample_size) __UpperCamelCase : List[Any] =jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa ) __UpperCamelCase : Dict =jnp.ones((1,) , dtype=jnp.intaa ) __UpperCamelCase : Union[str, Any] =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __UpperCamelCase : int =(1, 3, self.sample_size * 8, self.sample_size * 8) __UpperCamelCase : Optional[int] =jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa ) __UpperCamelCase , __UpperCamelCase : Union[str, Any] =jax.random.split(lowerCamelCase__ ) __UpperCamelCase : List[str] ={'params': params_rng, 'dropout': dropout_rng} return self.init(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )["params"] def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =self.block_out_channels __UpperCamelCase : List[Any] =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __UpperCamelCase : int =self.num_attention_heads or self.attention_head_dim # input __UpperCamelCase : Dict =nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __UpperCamelCase : Dict =FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __UpperCamelCase : Optional[int] =FlaxTimestepEmbedding(lowerCamelCase__ , dtype=self.dtype ) __UpperCamelCase : List[str] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) __UpperCamelCase : List[Any] =self.only_cross_attention if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase : Optional[int] =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase : Optional[int] =(num_attention_heads,) * len(self.down_block_types ) # down __UpperCamelCase : int =[] __UpperCamelCase : int =[] __UpperCamelCase : Dict =block_out_channels[0] __UpperCamelCase : List[Any] =nn.Conv( lowerCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowerCamelCase__ ) for i, down_block_type in enumerate(self.down_block_types ): __UpperCamelCase : Union[str, Any] =output_channel __UpperCamelCase : str =block_out_channels[i] __UpperCamelCase : List[str] =i == len(lowerCamelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __UpperCamelCase : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: __UpperCamelCase : Any =FlaxDownBlockaD( in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCamelCase__ ) for _ in range(self.layers_per_block ): __UpperCamelCase : Union[str, Any] =nn.Conv( lowerCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowerCamelCase__ ) if not is_final_block: __UpperCamelCase : int =nn.Conv( lowerCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowerCamelCase__ ) __UpperCamelCase : Tuple =down_blocks __UpperCamelCase : Optional[Any] =controlnet_down_blocks # mid __UpperCamelCase : Union[str, Any] =block_out_channels[-1] __UpperCamelCase : Tuple =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) __UpperCamelCase : int =nn.Conv( lowerCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = False , ): """simple docstring""" __UpperCamelCase : Dict =self.controlnet_conditioning_channel_order if channel_order == "bgr": __UpperCamelCase : Tuple =jnp.flip(lowerCamelCase__ , axis=1 ) # 1. time if not isinstance(lowerCamelCase__ , jnp.ndarray ): __UpperCamelCase : Optional[int] =jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: __UpperCamelCase : Optional[Any] =timesteps.astype(dtype=jnp.floataa ) __UpperCamelCase : Optional[Any] =jnp.expand_dims(lowerCamelCase__ , 0 ) __UpperCamelCase : List[Any] =self.time_proj(lowerCamelCase__ ) __UpperCamelCase : Optional[int] =self.time_embedding(lowerCamelCase__ ) # 2. pre-process __UpperCamelCase : Any =jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) ) __UpperCamelCase : Optional[Any] =self.conv_in(lowerCamelCase__ ) __UpperCamelCase : int =jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) ) __UpperCamelCase : Any =self.controlnet_cond_embedding(lowerCamelCase__ ) sample += controlnet_cond # 3. down __UpperCamelCase : Optional[int] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase , __UpperCamelCase : Union[str, Any] =down_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train ) else: __UpperCamelCase , __UpperCamelCase : str =down_block(lowerCamelCase__ , lowerCamelCase__ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid __UpperCamelCase : List[str] =self.mid_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train ) # 5. contronet blocks __UpperCamelCase : Any =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase__ , self.controlnet_down_blocks ): __UpperCamelCase : List[str] =controlnet_block(lowerCamelCase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __UpperCamelCase : str =controlnet_down_block_res_samples __UpperCamelCase : str =self.controlnet_mid_block(lowerCamelCase__ ) # 6. scaling __UpperCamelCase : Any =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase__ , mid_block_res_sample=lowerCamelCase__ )
71
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : str = FlaxAutoencoderKL @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Dict = 4 _lowerCamelCase : List[str] = 3 _lowerCamelCase : List[Any] = (3_2, 3_2) _lowerCamelCase : str = jax.random.PRNGKey(0 ) _lowerCamelCase : int = jax.random.uniform(__lowerCAmelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = { '''block_out_channels''': [3_2, 6_4], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } _lowerCamelCase : Tuple = self.dummy_input return init_dict, inputs_dict
72
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a ="""\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ a ="""\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ a =r""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def lowerCAmelCase ( self : List[Any]): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string'), 'references': datasets.Value('string'), }) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple): __lowerCamelCase : str = 0.0 for i, j in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else 0.0 __lowerCamelCase : Tuple = n_correct / len(SCREAMING_SNAKE_CASE__) return { "accuracy": accuracy, }
73
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _lowercase = logging.getLogger(__name__) _lowercase = '''pytorch_model.bin''' @dataclasses.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = dataclasses.field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , ) @dataclasses.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} ) _lowerCamelCase: str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default=_lowercase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default=_lowercase , metadata={'''help''': '''The name of the task to train on.'''} , ) _lowerCamelCase: Optional[List[str]] = dataclasses.field( default=_lowercase , metadata={'''help''': '''The list of labels for the task.'''} ) @dataclasses.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = dataclasses.field( metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} ) _lowerCamelCase: Optional[str] = dataclasses.field( default='''no''' , metadata={ '''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]''' } , ) _lowerCamelCase: Optional[int] = dataclasses.field( default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) _lowerCamelCase: Optional[float] = dataclasses.field( default=0.0 , metadata={ '''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.''' } , ) _lowerCamelCase: Optional[bool] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , ) _lowerCamelCase: Optional[bool] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , ) _lowerCamelCase: Optional[bool] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , ) _lowerCamelCase: Optional[float] = dataclasses.field( default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , ) _lowerCamelCase: Optional[int] = dataclasses.field( default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) _lowerCamelCase: Optional[int] = dataclasses.field( default=_lowercase , metadata={'''help''': '''Random seed for initialization.'''} , ) def _snake_case ( snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Dict ): A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: A = dataset.filter(lambda snake_case__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 A = int(eval_result * len(snake_case__ ) ) print(snake_case__ ) A = dataset.sort('probability' , reverse=snake_case__ ) A = dataset.select(range(snake_case__ ) ) A = dataset.remove_columns(['label', 'probability'] ) A = dataset.rename_column('prediction' , 'label' ) A = dataset.map(lambda snake_case__ : {"label": idalabel[example["label"]]} ) A = dataset.shuffle(seed=args.seed ) A = os.path.join(snake_case__ , F'train_pseudo.{args.data_file_extension}' ) if args.data_file_extension == "csv": dataset.to_csv(snake_case__ , index=snake_case__ ) else: dataset.to_json(snake_case__ ) def _snake_case ( snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str , **snake_case__ : Optional[int] ): A = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() A = STModelArguments(model_name_or_path=snake_case__ ) A = STDataArguments(train_file=snake_case__ , infer_file=snake_case__ ) A = STTrainingArguments(output_dir=snake_case__ ) A = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(snake_case__ ).items(): setattr(snake_case__ , snake_case__ , snake_case__ ) for key, value in kwargs.items(): if hasattr(snake_case__ , snake_case__ ): setattr(snake_case__ , snake_case__ , snake_case__ ) # Sanity checks A = {} A = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None A = args.train_file A = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None A = args.eval_file for key in data_files: A = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.' if args.data_file_extension is None: A = extension else: assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.' assert ( args.eval_metric in datasets.list_metrics() ), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) A = F'{args.output_dir}/self-train_iter-{{}}'.format A = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) accelerator.wait_for_everyone() A = None A = None A = 0 A = False # Show the progress bar A = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): A = data_dir_format(snake_case__ ) assert os.path.exists(snake_case__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 A = os.path.join(snake_case__ , 'stage-1' ) A = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(snake_case__ , snake_case__ ): arguments_dict.update({key: value} ) A = os.path.join(snake_case__ , 'best-checkpoint' , snake_case__ ) if os.path.exists(snake_case__ ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , snake_case__ , snake_case__ , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , snake_case__ ) finetune(**snake_case__ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case__ ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , snake_case__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data A = os.path.join(snake_case__ , 'best-checkpoint' ) A = os.path.join(snake_case__ , 'stage-2' ) # Update arguments_dict A = model_path A = data_files['train'] A = current_output_dir A = os.path.join(snake_case__ , 'best-checkpoint' , snake_case__ ) if os.path.exists(snake_case__ ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , snake_case__ , snake_case__ , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , snake_case__ ) finetune(**snake_case__ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case__ ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , snake_case__ ) A = iteration A = data_dir_format(iteration + 1 ) A = AutoConfig.from_pretrained(os.path.join(snake_case__ , 'best-checkpoint' ) ) A = config.idalabel A = os.path.join(snake_case__ , 'eval_results_best-checkpoint.json' ) A = os.path.join(snake_case__ , 'test_results_best-checkpoint.json' ) assert os.path.exists(snake_case__ ) with open(snake_case__ , 'r' ) as f: A = float(json.load(snake_case__ )[args.eval_metric] ) A = os.path.join(snake_case__ , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(snake_case__ ) # Loading the dataset from local csv or json files. A = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] A = load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(snake_case__ , exist_ok=snake_case__ ) shutil.copy(snake_case__ , os.path.join(snake_case__ , F'eval_results_iter-{iteration}.json' ) ) if os.path.exists(snake_case__ ): shutil.copy(snake_case__ , os.path.join(snake_case__ , F'test_results_iter-{iteration}.json' ) ) create_pseudo_labeled_data(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) accelerator.wait_for_everyone() A = os.path.join(snake_case__ , F'train_pseudo.{args.data_file_extension}' ) if args.evaluation_strategy != IntervalStrategy.NO.value: A = eval_result if best_iteration is None: A = new_iteration A = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: A = new_iteration A = new_eval_result A = 0 else: if new_eval_result == best_eval_result: A = new_iteration A = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: A = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , snake_case__ ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , snake_case__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(snake_case__ , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , snake_case__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(snake_case__ , 'eval_results_best-iteration.json' ) , )
74
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging a_ : Optional[int] = logging.get_logger(__name__) a_ : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED a_ : Any = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } a_ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ =( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCamelCase_ =bs[:] lowerCamelCase_ =0 for b in range(2**8 ): if b not in bs: bs.append(__snake_case ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ =[chr(__snake_case ) for n in cs] return dict(zip(__snake_case , __snake_case ) ) def a_ ( __snake_case : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] =VOCAB_FILES_NAMES lowercase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Any =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="replace", lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else bos_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else eos_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else sep_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else cls_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else unk_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token super().__init__( errors=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, cls_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, **lowerCAmelCase, ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} lowerCamelCase_ =errors # how to handle errors in decoding lowerCamelCase_ =bytes_to_unicode() lowerCamelCase_ ={v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} lowerCamelCase_ =add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: return token while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ =j if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ =''' '''.join(lowerCAmelCase ) lowerCamelCase_ =word return word def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for token in re.findall(self.pat, lowerCAmelCase ): lowerCamelCase_ =''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(''' ''' ) ) return bpe_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''''''.join(lowerCAmelCase ) lowerCamelCase_ =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] lowerCamelCase_ =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()): lowerCamelCase_ =''' ''' + text return (text, kwargs) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = PaddingStrategy.DO_NOT_PAD, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =super()._pad( encoded_inputs=lowerCAmelCase, max_length=lowerCAmelCase, padding_strategy=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, ) # Load from model defaults if return_attention_mask is None: lowerCamelCase_ ='''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase_ =encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase_ =len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCAmelCase ) if needs_to_be_padded: lowerCamelCase_ =len(lowerCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase_ =( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase_ =[-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
75
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ = logging.get_logger(__name__) a_ = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def lowerCamelCase__ ( _a): for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE : str = model_type_to_module_name(_a) SCREAMING_SNAKE_CASE : List[str] = importlib.import_module(f".{module_name}" , "transformers.models") try: return getattr(_a , _a) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_a , "__name__" , _a) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module("transformers") if hasattr(_a , _a): return getattr(_a , _a) return None def lowerCamelCase__ ( _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ): SCREAMING_SNAKE_CASE : Any = get_file_from_repo( _a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead.") return {} with open(_a , encoding="utf-8") as reader: return json.load(_a) class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[int] ) -> Optional[Any]: """simple docstring""" raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(a ) def __UpperCamelCase ( cls : Optional[int] , a : List[str] , **a : List[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("config" , a ) SCREAMING_SNAKE_CASE : Any = kwargs.pop("trust_remote_code" , a ) SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = ImageProcessingMixin.get_image_processor_dict(a , **a ) SCREAMING_SNAKE_CASE : Any = config_dict.get("image_processor_type" , a ) SCREAMING_SNAKE_CASE : Tuple = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : str = config_dict["auto_map"]["AutoImageProcessor"] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE : Optional[Any] = config_dict.pop("feature_extractor_type" , a ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["auto_map"]["AutoFeatureExtractor"] SCREAMING_SNAKE_CASE : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(a , a ): SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(a , **a ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE : Optional[int] = getattr(a , "image_processor_type" , a ) if hasattr(a , "auto_map" ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE : Union[str, Any] = config.auto_map["AutoImageProcessor"] if image_processor_class is not None: SCREAMING_SNAKE_CASE : Dict = image_processor_class_from_name(a ) SCREAMING_SNAKE_CASE : Tuple = image_processor_auto_map is not None SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor_class is not None or type(a ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE : Optional[int] = resolve_trust_remote_code( a , a , a , a ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE : int = get_class_from_dynamic_module( a , a , **a ) SCREAMING_SNAKE_CASE : Dict = kwargs.pop("code_revision" , a ) if os.path.isdir(a ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(a , **a ) elif image_processor_class is not None: return image_processor_class.from_dict(a , **a ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(a ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(a )] return image_processor_class.from_dict(a , **a ) raise ValueError( F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a " F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following " F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" ) @staticmethod def __UpperCamelCase ( a : Dict , a : List[str] ) -> Optional[Any]: """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(a , a )
76
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) _UpperCamelCase : Tuple = None _UpperCamelCase : str = { "7B": 1_10_08, "13B": 1_38_24, "30B": 1_79_20, "65B": 2_20_16, "70B": 2_86_72, } _UpperCamelCase : int = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Union[str, Any]=256 ): '''simple docstring''' return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a_ ( _lowerCAmelCase : int ): '''simple docstring''' with open(_lowerCAmelCase , 'r' ) as f: return json.load(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): '''simple docstring''' with open(_lowerCAmelCase , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=True ): '''simple docstring''' os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) lowercase__ : str = os.path.join(_lowerCAmelCase , 'tmp' ) os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) lowercase__ : List[str] = read_json(os.path.join(_lowerCAmelCase , 'params.json' ) ) lowercase__ : Any = NUM_SHARDS[model_size] lowercase__ : str = params['n_layers'] lowercase__ : List[Any] = params['n_heads'] lowercase__ : Tuple = n_heads // num_shards lowercase__ : Tuple = params['dim'] lowercase__ : List[str] = dim // n_heads lowercase__ : Dict = 1_0_0_0_0.0 lowercase__ : Any = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: lowercase__ : Any = params['n_kv_heads'] # for GQA / MQA lowercase__ : Tuple = n_heads_per_shard // num_key_value_heads lowercase__ : Optional[Any] = dim // num_key_value_heads else: # compatibility with other checkpoints lowercase__ : Dict = n_heads lowercase__ : Tuple = n_heads_per_shard lowercase__ : int = dim # permute for sliced rotary def permute(_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=n_heads , _lowerCAmelCase : Any=dim , _lowerCAmelCase : List[Any]=dim ): return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase ).transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase ) print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) lowercase__ : Tuple = torch.load(os.path.join(_lowerCAmelCase , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded lowercase__ : int = [ torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location='cpu' ) for i in range(_lowerCAmelCase ) ] lowercase__ : List[Any] = 0 lowercase__ : Dict = {'weight_map': {}} for layer_i in range(_lowerCAmelCase ): lowercase__ : List[Any] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded lowercase__ : Any = { f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[f"""layers.{layer_i}.attention.wq.weight"""] ), f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[f"""layers.{layer_i}.attention.wk.weight"""] ), f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""], f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""], f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""], f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""], f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""], f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""], f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. lowercase__ : Optional[int] = { f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ f"""layers.{layer_i}.attention_norm.weight""" ].clone(), f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ f"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } lowercase__ : Optional[Any] = permute( torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for i in range(_lowerCAmelCase ) ] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase__ : List[str] = permute( torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for i in range(_lowerCAmelCase ) ] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) lowercase__ : Optional[int] = torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for i in range(_lowerCAmelCase ) ] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Tuple = torch.cat( [loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase )] , dim=1 ) lowercase__ : Union[str, Any] = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase )] , dim=0 ) lowercase__ : Optional[int] = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase )] , dim=1 ) lowercase__ : Any = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase )] , dim=0 ) lowercase__ : Optional[Any] = inv_freq for k, v in state_dict.items(): lowercase__ : Tuple = filename param_count += v.numel() torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase__ : Dict = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded lowercase__ : Tuple = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: lowercase__ : int = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_lowerCAmelCase )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_lowerCAmelCase )] , dim=0 ), } for k, v in state_dict.items(): lowercase__ : Optional[Any] = filename param_count += v.numel() torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) # Write configs lowercase__ : int = {'total_size': param_count * 2} write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin.index.json' ) ) lowercase__ : str = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 lowercase__ : Optional[Any] = params['multiple_of'] if 'multiple_of' in params else 256 lowercase__ : Tuple = LlamaConfig( hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_lowerCAmelCase , ) config.save_pretrained(_lowerCAmelCase ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) lowercase__ : Dict = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' lowercase__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) lowercase__ : Tuple = tokenizer_class(_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : Dict = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_lowerCAmelCase , help='Whether or not to save using `safetensors`.' ) lowercase__ : str = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) lowercase__ : str = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _lowerCAmelCase ) if __name__ == "__main__": main()
77
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent snake_case_ = {"""UserAgent""": UserAgent().random} def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = script.contents[0] UpperCAmelCase = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A_ : """simple docstring""" def __init__( self :int , lowercase_ :str ) -> Dict: UpperCAmelCase = f"""https://www.instagram.com/{username}/""" UpperCAmelCase = self.get_json() def UpperCAmelCase__ ( self :Tuple ) -> dict: UpperCAmelCase = requests.get(self.url , headers=lowercase_ ).text UpperCAmelCase = BeautifulSoup(lowercase_ , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self :Dict ) -> str: return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self :int ) -> str: return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def UpperCAmelCase__ ( self :Optional[Any] ) -> str: return self.user_data["username"] @property def UpperCAmelCase__ ( self :str ) -> str: return self.user_data["full_name"] @property def UpperCAmelCase__ ( self :Tuple ) -> str: return self.user_data["biography"] @property def UpperCAmelCase__ ( self :Optional[Any] ) -> str: return self.user_data["business_email"] @property def UpperCAmelCase__ ( self :Optional[int] ) -> str: return self.user_data["external_url"] @property def UpperCAmelCase__ ( self :Optional[int] ) -> int: return self.user_data["edge_followed_by"]["count"] @property def UpperCAmelCase__ ( self :int ) -> int: return self.user_data["edge_follow"]["count"] @property def UpperCAmelCase__ ( self :List[str] ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def UpperCAmelCase__ ( self :Dict ) -> str: return self.user_data["profile_pic_url_hd"] @property def UpperCAmelCase__ ( self :Any ) -> bool: return self.user_data["is_verified"] @property def UpperCAmelCase__ ( self :Dict ) -> bool: return self.user_data["is_private"] def _lowerCAmelCase ( lowercase_ = "github" ): import os if os.environ.get('CI' ): return # test failing on GitHub Actions UpperCAmelCase = InstagramUser(lowercase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() snake_case_ = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
78
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
'''simple docstring''' from math import ceil def _UpperCamelCase ( __A , __A ) -> Tuple: '''simple docstring''' UpperCamelCase__ = list(range(0 , __A ) ) UpperCamelCase__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check UpperCamelCase__ = [] for i in device_map_blocks: if device_map_blocks.count(__A ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__A ) # Missing blocks UpperCamelCase__ = [i for i in blocks if i not in device_map_blocks] UpperCamelCase__ = [i for i in device_map_blocks if i not in blocks] if len(__A ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(__A ) ) if len(__A ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(__A ) ) if len(__A ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(__A ) ) def _UpperCamelCase ( __A , __A ) -> int: '''simple docstring''' UpperCamelCase__ = list(range(__A ) ) UpperCamelCase__ = int(ceil(n_layers / len(__A ) ) ) UpperCamelCase__ = [layers[i : i + n_blocks] for i in range(0 , __A , __A )] return dict(zip(__A , __A ) )
80
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0