code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''yolos''' def __init__( self : int , _A : Optional[Any]=768 , _A : Tuple=12 , _A : Any=12 , _A : Any=3072 , _A : Dict="gelu" , _A : Optional[Any]=0.0 , _A : List[str]=0.0 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-12 , _A : int=[512, 864] , _A : List[Any]=16 , _A : Tuple=3 , _A : str=True , _A : Optional[Any]=100 , _A : Optional[Any]=True , _A : Optional[Any]=False , _A : int=1 , _A : Any=5 , _A : Dict=2 , _A : int=5 , _A : List[Any]=2 , _A : Optional[Any]=0.1 , **_A : Optional[int] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : int = hidden_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : List[str] = num_attention_heads __SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size __SCREAMING_SNAKE_CASE : Dict = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Any = initializer_range __SCREAMING_SNAKE_CASE : str = layer_norm_eps __SCREAMING_SNAKE_CASE : str = image_size __SCREAMING_SNAKE_CASE : str = patch_size __SCREAMING_SNAKE_CASE : List[str] = num_channels __SCREAMING_SNAKE_CASE : Any = qkv_bias __SCREAMING_SNAKE_CASE : Tuple = num_detection_tokens __SCREAMING_SNAKE_CASE : Any = use_mid_position_embeddings __SCREAMING_SNAKE_CASE : Optional[int] = auxiliary_loss # Hungarian matcher __SCREAMING_SNAKE_CASE : str = class_cost __SCREAMING_SNAKE_CASE : str = bbox_cost __SCREAMING_SNAKE_CASE : Tuple = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE : Optional[int] = bbox_loss_coefficient __SCREAMING_SNAKE_CASE : Optional[Any] = giou_loss_coefficient __SCREAMING_SNAKE_CASE : str = eos_coefficient class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : str ): """simple docstring""" return 1e-4 @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 12
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
def a__ ( snake_case , snake_case ): """simple docstring""" if len(snake_case ) != len(snake_case ): raise ValueError('''String lengths must match!''' ) __SCREAMING_SNAKE_CASE : Dict = 0 for chara, chara in zip(snake_case , snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : """simple docstring""" def __init__( self : str , _A : List[Any] , _A : Union[str, Any]=13 , _A : List[Any]=7 , _A : Any=True , _A : Dict=True , _A : Union[str, Any]=False , _A : Optional[Any]=True , _A : str=99 , _A : Any=32 , _A : Optional[int]=5 , _A : Dict=4 , _A : Dict=37 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : Tuple=0.1 , _A : List[Any]=512 , _A : Dict=16 , _A : Any=2 , _A : List[Any]=0.02 , _A : Tuple=3 , _A : Dict=4 , _A : Tuple=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[Any] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = seq_length __SCREAMING_SNAKE_CASE : str = is_training __SCREAMING_SNAKE_CASE : Tuple = use_input_mask __SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids __SCREAMING_SNAKE_CASE : Optional[Any] = use_labels __SCREAMING_SNAKE_CASE : str = vocab_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : str = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size __SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size __SCREAMING_SNAKE_CASE : List[str] = initializer_range __SCREAMING_SNAKE_CASE : str = num_labels __SCREAMING_SNAKE_CASE : str = num_choices __SCREAMING_SNAKE_CASE : List[Any] = scope def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Any = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : str = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : List[str] = None __SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: __SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Tuple , _A : Optional[Any] , _A : List[Any] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = BioGptModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A ) __SCREAMING_SNAKE_CASE : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Optional[Any] , _A : int , _A : Tuple , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = BioGptForCausalLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Optional[Any] , _A : Any , _A : Union[str, Any] , _A : Dict , _A : List[str] , _A : Union[str, Any] , *_A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=_A ) model.to(_A ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE : Any = torch.ones(input_ids.shape , dtype=torch.long , device=_A ) __SCREAMING_SNAKE_CASE : Any = self.seq_length // 2 __SCREAMING_SNAKE_CASE : List[Any] = 0 # first forward pass __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , _A ).item() + 1 __SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE : Any = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )['''last_hidden_state'''] __SCREAMING_SNAKE_CASE : List[str] = model(_A , past_key_values=_A , attention_mask=_A )['''last_hidden_state'''] # select random slice __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : List[Any] , _A : List[str] , _A : Dict , _A : int , *_A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BioGptModel(config=_A ).to(_A ).eval() __SCREAMING_SNAKE_CASE : str = torch.ones(input_ids.shape , dtype=torch.long , device=_A ) # first forward pass __SCREAMING_SNAKE_CASE : List[Any] = model(_A , attention_mask=_A , use_cache=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A )['''last_hidden_state'''] __SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A , past_key_values=_A )[ '''last_hidden_state''' ] # select random slice __SCREAMING_SNAKE_CASE : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Any , _A : List[str] , _A : str , _A : Dict , _A : List[str] , _A : int , *_A : Optional[Any] , _A : str=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM(_A ) model.to(_A ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE : Dict = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[Any] , _A : str , *_A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BioGptModel(_A ) __SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : str , _A : Dict , _A : List[str] , _A : Dict , *_A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : Any = BioGptForTokenClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Any = model(_A , attention_mask=_A , token_type_ids=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ) : List[str] = config_and_inputs __SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowerCAmelCase_ = (BioGptForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = ( { '''feature-extraction''': BioGptModel, '''text-classification''': BioGptForSequenceClassification, '''text-generation''': BioGptForCausalLM, '''token-classification''': BioGptForTokenClassification, '''zero-shot''': BioGptForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = False def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCAmelCase__ ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE : Any = type self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*_A ) @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(_A ) __SCREAMING_SNAKE_CASE : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __SCREAMING_SNAKE_CASE : List[str] = '''left''' # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token __SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE : Optional[int] = [ '''Hello, my dog is a little''', '''Today, I''', ] __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_A , return_tensors='''pt''' , padding=_A ) __SCREAMING_SNAKE_CASE : List[Any] = inputs['''input_ids'''].to(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate( input_ids=_A , attention_mask=inputs['''attention_mask'''].to(_A ) , ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_A ) __SCREAMING_SNAKE_CASE : int = model.generate(input_ids=_A ) __SCREAMING_SNAKE_CASE : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE : Any = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_A ) __SCREAMING_SNAKE_CASE : Dict = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : Optional[Any] = 3 __SCREAMING_SNAKE_CASE : str = input_dict['''input_ids'''] __SCREAMING_SNAKE_CASE : List[Any] = input_ids.ne(1 ).to(_A ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : List[Any] = 3 __SCREAMING_SNAKE_CASE : Optional[int] = '''multi_label_classification''' __SCREAMING_SNAKE_CASE : List[Any] = input_dict['''input_ids'''] __SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE : Optional[int] = BioGptForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] ) __SCREAMING_SNAKE_CASE : Tuple = model(_A )[0] __SCREAMING_SNAKE_CASE : int = 4_2384 __SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __SCREAMING_SNAKE_CASE : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(_A ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate( **_A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_A , ) __SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_ids[0] , skip_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(_A , _A )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase__ ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCAmelCase_ = Features({'''image''': Image()} ) lowerCAmelCase_ = Features({'''labels''': ClassLabel} ) lowerCAmelCase_ = "image" lowerCAmelCase_ = "labels" def UpperCAmelCase__ ( self : Optional[Any] , _A : Optional[Any] ): """simple docstring""" if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , _A ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) __SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self ) __SCREAMING_SNAKE_CASE : Optional[int] = self.label_schema.copy() __SCREAMING_SNAKE_CASE : Union[str, Any] = features[self.label_column] __SCREAMING_SNAKE_CASE : List[Any] = label_schema return task_template @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
import argparse import struct import unittest class __UpperCamelCase : """simple docstring""" def __init__( self : Union[str, Any] , _A : bytes ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = data # Initialize hash values __SCREAMING_SNAKE_CASE : List[str] = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants __SCREAMING_SNAKE_CASE : List[str] = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] __SCREAMING_SNAKE_CASE : List[Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCAmelCase__ ( _A : bytes ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = B'''\x80''' + (B'''\x00''' * (63 - (len(_A ) + 8) % 64)) __SCREAMING_SNAKE_CASE : Optional[int] = struct.pack('''>Q''' , (len(_A ) * 8) ) return data + padding + big_endian_integer def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __SCREAMING_SNAKE_CASE : Dict = list(struct.unpack('''>16L''' , _A ) ) # add 48 0-ed integers words += [0] * 48 __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __SCREAMING_SNAKE_CASE : Optional[int] = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression __SCREAMING_SNAKE_CASE : Any = self.ror(_A , 6 ) ^ self.ror(_A , 11 ) ^ self.ror(_A , 25 ) __SCREAMING_SNAKE_CASE : int = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) __SCREAMING_SNAKE_CASE : List[str] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 __SCREAMING_SNAKE_CASE : Dict = self.ror(_A , 2 ) ^ self.ror(_A , 13 ) ^ self.ror(_A , 22 ) __SCREAMING_SNAKE_CASE : str = (a & b) ^ (a & c) ^ (b & c) __SCREAMING_SNAKE_CASE : Dict = (sa + maj) % 0x1_00_00_00_00 __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) __SCREAMING_SNAKE_CASE : Tuple = [a, b, c, d, e, f, g, h] # Modify final values __SCREAMING_SNAKE_CASE : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] __SCREAMING_SNAKE_CASE : List[Any] = ''''''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : int ): """simple docstring""" return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" import hashlib __SCREAMING_SNAKE_CASE : Tuple = bytes('''Test String''' , '''utf-8''' ) self.assertEqual(SHAaaa(_A ).hash , hashlib.shaaaa(_A ).hexdigest() ) def a__ ( ): """simple docstring""" import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() __SCREAMING_SNAKE_CASE : Tuple = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Union[str, Any] = f.read() else: __SCREAMING_SNAKE_CASE : str = bytes(snake_case , '''utf-8''' ) print(SHAaaa(snake_case ).hash ) if __name__ == "__main__": main()
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
def a__ ( snake_case ): # noqa: E741 """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : int = [0] * n __SCREAMING_SNAKE_CASE : Optional[Any] = [False] * n __SCREAMING_SNAKE_CASE : Any = [False] * n def dfs(snake_case , snake_case , snake_case , snake_case ): if parent == root: out_edge_count += 1 __SCREAMING_SNAKE_CASE : Union[str, Any] = True __SCREAMING_SNAKE_CASE : Optional[Any] = at for to in l[at]: if to == parent: pass elif not visited[to]: __SCREAMING_SNAKE_CASE : Optional[int] = dfs(snake_case , snake_case , snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Any = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __SCREAMING_SNAKE_CASE : Dict = True # AP found via cycle if at == low[to]: __SCREAMING_SNAKE_CASE : str = True else: __SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , snake_case ) return out_edge_count for i in range(snake_case ): if not visited[i]: __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : List[str] = dfs(snake_case , snake_case , -1 , snake_case ) __SCREAMING_SNAKE_CASE : Any = out_edge_count > 1 for x in range(len(snake_case ) ): if is_art[x] is True: print(snake_case ) # Adjacency list of graph lowercase_ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __UpperCamelCase : """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[str] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : str = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : str = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''prompt'''] __SCREAMING_SNAKE_CASE : Dict = inputs['''generator'''] __SCREAMING_SNAKE_CASE : str = inputs['''num_inference_steps'''] __SCREAMING_SNAKE_CASE : List[str] = inputs['''output_type'''] if "image" in inputs: __SCREAMING_SNAKE_CASE : Any = inputs['''image'''] else: __SCREAMING_SNAKE_CASE : Optional[int] = None if "mask_image" in inputs: __SCREAMING_SNAKE_CASE : List[str] = inputs['''mask_image'''] else: __SCREAMING_SNAKE_CASE : List[Any] = None if "original_image" in inputs: __SCREAMING_SNAKE_CASE : Optional[int] = inputs['''original_image'''] else: __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = pipe.encode_prompt(_A ) # inputs with prompt converted to embeddings __SCREAMING_SNAKE_CASE : List[Any] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: __SCREAMING_SNAKE_CASE : Optional[int] = image if mask_image is not None: __SCREAMING_SNAKE_CASE : Any = mask_image if original_image is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_A , _A , _A ) __SCREAMING_SNAKE_CASE : Any = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : List[str] = inputs['''generator'''] __SCREAMING_SNAKE_CASE : Tuple = inputs['''num_inference_steps'''] __SCREAMING_SNAKE_CASE : Optional[Any] = inputs['''output_type'''] # inputs with prompt converted to embeddings __SCREAMING_SNAKE_CASE : List[str] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: __SCREAMING_SNAKE_CASE : Optional[int] = image if mask_image is not None: __SCREAMING_SNAKE_CASE : Optional[int] = mask_image if original_image is not None: __SCREAMING_SNAKE_CASE : str = original_image __SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**_A )[0] __SCREAMING_SNAKE_CASE : Optional[int] = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1e-4 ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : int = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : List[Any] = pipe_loaded(**_A )[0] __SCREAMING_SNAKE_CASE : Any = np.abs(to_np(_A ) - to_np(_A ) ).max() self.assertLess(_A , 1e-4 )
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowercase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase_ = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Dict , _A : PriorTransformer , _A : CLIPVisionModel , _A : CLIPImageProcessor , _A : HeunDiscreteScheduler , _A : ShapERenderer , ): """simple docstring""" super().__init__() self.register_modules( prior=_A , image_encoder=_A , image_processor=_A , scheduler=_A , renderer=_A , ) def UpperCAmelCase__ ( self : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict ): """simple docstring""" if latents is None: __SCREAMING_SNAKE_CASE : Tuple = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __SCREAMING_SNAKE_CASE : List[str] = latents.to(_A ) __SCREAMING_SNAKE_CASE : List[str] = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any]=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_A , _A ) @property def UpperCAmelCase__ ( self : Dict ): """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_A , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def UpperCAmelCase__ ( self : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : List[str] , ): """simple docstring""" if isinstance(_A , _A ) and isinstance(image[0] , torch.Tensor ): __SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(_A , axis=0 ) if image[0].ndim == 4 else torch.stack(_A , axis=0 ) if not isinstance(_A , torch.Tensor ): __SCREAMING_SNAKE_CASE : Any = self.image_processor(_A , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) __SCREAMING_SNAKE_CASE : List[Any] = image.to(dtype=self.image_encoder.dtype , device=_A ) __SCREAMING_SNAKE_CASE : Dict = self.image_encoder(_A )['''last_hidden_state'''] __SCREAMING_SNAKE_CASE : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 __SCREAMING_SNAKE_CASE : Optional[Any] = image_embeds.repeat_interleave(_A , dim=0 ) if do_classifier_free_guidance: __SCREAMING_SNAKE_CASE : int = torch.zeros_like(_A ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __SCREAMING_SNAKE_CASE : str = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_A ) def __call__( self : List[str] , _A : Union[PIL.Image.Image, List[PIL.Image.Image]] , _A : int = 1 , _A : int = 25 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : float = 4.0 , _A : int = 64 , _A : Optional[str] = "pil" , _A : bool = True , ): """simple docstring""" if isinstance(_A , PIL.Image.Image ): __SCREAMING_SNAKE_CASE : str = 1 elif isinstance(_A , torch.Tensor ): __SCREAMING_SNAKE_CASE : int = image.shape[0] elif isinstance(_A , _A ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): __SCREAMING_SNAKE_CASE : List[Any] = len(_A ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_A )}''' ) __SCREAMING_SNAKE_CASE : List[str] = self._execution_device __SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size * num_images_per_prompt __SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0 __SCREAMING_SNAKE_CASE : Dict = self._encode_image(_A , _A , _A , _A ) # prior self.scheduler.set_timesteps(_A , device=_A ) __SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps __SCREAMING_SNAKE_CASE : Dict = self.prior.config.num_embeddings __SCREAMING_SNAKE_CASE : int = self.prior.config.embedding_dim __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _A , _A , _A , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim __SCREAMING_SNAKE_CASE : Tuple = latents.reshape(latents.shape[0] , _A , _A ) for i, t in enumerate(self.progress_bar(_A ) ): # expand the latents if we are doing classifier free guidance __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.scale_model_input(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[int] = self.prior( _A , timestep=_A , proj_embedding=_A , ).predicted_image_embedding # remove the variance __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.chunk(2 ) __SCREAMING_SNAKE_CASE : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) __SCREAMING_SNAKE_CASE : str = self.scheduler.step( _A , timestep=_A , sample=_A , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_A ) __SCREAMING_SNAKE_CASE : str = [] for i, latent in enumerate(_A ): print() __SCREAMING_SNAKE_CASE : Dict = self.renderer.decode( latent[None, :] , _A , size=_A , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack(_A ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = images.cpu().numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE : Optional[int] = [self.numpy_to_pil(_A ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_A )
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
import math def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Dict = 0 while num > 0: __SCREAMING_SNAKE_CASE : Tuple = num % 8 __SCREAMING_SNAKE_CASE : Optional[int] = octal + (remainder * math.floor(math.pow(10 , snake_case ) )) counter += 1 __SCREAMING_SNAKE_CASE : int = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'''0o{int(snake_case )}''' def a__ ( ): """simple docstring""" print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(65 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(216 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(512 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(snake_case ) return torch.mm(snake_case , normalized_text_embeds.t() ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = CLIPConfig lowerCAmelCase_ = ['''CLIPEncoderLayer'''] def __init__( self : List[Any] , _A : CLIPConfig ): """simple docstring""" super().__init__(_A ) __SCREAMING_SNAKE_CASE : List[Any] = CLIPVisionModel(config.vision_config ) __SCREAMING_SNAKE_CASE : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=_A ) __SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_A ) @torch.no_grad() def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.vision_model(_A )[1] # pooled_output __SCREAMING_SNAKE_CASE : Optional[int] = self.visual_projection(_A ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __SCREAMING_SNAKE_CASE : Any = cosine_distance(_A , self.special_care_embeds ).cpu().float().numpy() __SCREAMING_SNAKE_CASE : Dict = cosine_distance(_A , self.concept_embeds ).cpu().float().numpy() __SCREAMING_SNAKE_CASE : Union[str, Any] = [] __SCREAMING_SNAKE_CASE : int = image_embeds.shape[0] for i in range(_A ): __SCREAMING_SNAKE_CASE : Dict = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images __SCREAMING_SNAKE_CASE : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): __SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist[i][concept_idx] __SCREAMING_SNAKE_CASE : List[str] = self.special_care_embeds_weights[concept_idx].item() __SCREAMING_SNAKE_CASE : List[str] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) __SCREAMING_SNAKE_CASE : Dict = 0.01 for concept_idx in range(len(cos_dist[0] ) ): __SCREAMING_SNAKE_CASE : Tuple = cos_dist[i][concept_idx] __SCREAMING_SNAKE_CASE : List[str] = self.concept_embeds_weights[concept_idx].item() __SCREAMING_SNAKE_CASE : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_A ) result.append(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase__ ( self : str , _A : torch.FloatTensor , _A : torch.FloatTensor ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(_A )[1] # pooled_output __SCREAMING_SNAKE_CASE : str = self.visual_projection(_A ) __SCREAMING_SNAKE_CASE : List[Any] = cosine_distance(_A , self.special_care_embeds ) __SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(_A , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images __SCREAMING_SNAKE_CASE : int = 0.0 __SCREAMING_SNAKE_CASE : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) __SCREAMING_SNAKE_CASE : List[Any] = special_care * 0.01 __SCREAMING_SNAKE_CASE : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) __SCREAMING_SNAKE_CASE : int = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) __SCREAMING_SNAKE_CASE : int = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
def a__ ( snake_case ): """simple docstring""" if isinstance(snake_case , snake_case ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case , snake_case ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" __SCREAMING_SNAKE_CASE : Dict = False if num < 0: __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : Any = -num __SCREAMING_SNAKE_CASE : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case ) for e in binary ) return "0b" + "".join(str(snake_case ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , ) def UpperCAmelCase__ ( self : List[str] , _A : Optional[int] , _A : Tuple , _A : Any=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = spearmanr(_A , _A ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : Any , _A : List[Any]=7 , _A : Union[str, Any]=3 , _A : str=30 , _A : int=400 , _A : Union[str, Any]=True , _A : Dict=None , _A : Any=0.9 , _A : Tuple=None , _A : Tuple=True , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : List[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = size if size is not None else {'''shortest_edge''': 30} __SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : str = min_resolution __SCREAMING_SNAKE_CASE : List[Any] = max_resolution __SCREAMING_SNAKE_CASE : Optional[int] = do_resize_and_center_crop __SCREAMING_SNAKE_CASE : List[str] = size __SCREAMING_SNAKE_CASE : List[str] = crop_pct __SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size __SCREAMING_SNAKE_CASE : str = do_normalize __SCREAMING_SNAKE_CASE : List[Any] = image_mean __SCREAMING_SNAKE_CASE : int = image_std def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''crop_pct''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" pass def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : str = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowercase_ = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowercase_ = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowercase_ = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowercase_ = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowercase_ = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = DPRQuestionEncoderTokenizer lowercase_ = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowercase_ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowercase_ = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCAmelCase__ ) class __UpperCamelCase : """simple docstring""" def __call__( self : List[str] , _A : Tuple , _A : Optional[str] = None , _A : Optional[str] = None , _A : Union[bool, str] = False , _A : Union[bool, str] = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , **_A : Dict , ): """simple docstring""" if titles is None and texts is None: return super().__call__( _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , ) elif titles is None or texts is None: __SCREAMING_SNAKE_CASE : Tuple = titles if texts is None else texts return super().__call__( _A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , ) __SCREAMING_SNAKE_CASE : List[Any] = titles if not isinstance(_A , _A ) else [titles] __SCREAMING_SNAKE_CASE : Any = texts if not isinstance(_A , _A ) else [texts] __SCREAMING_SNAKE_CASE : List[Any] = len(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = questions if not isinstance(_A , _A ) else [questions] * n_passages assert len(_A ) == len( _A ), F'''There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.''' __SCREAMING_SNAKE_CASE : Tuple = super().__call__(_A , _A , padding=_A , truncation=_A )['''input_ids'''] __SCREAMING_SNAKE_CASE : Any = super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['''input_ids'''] __SCREAMING_SNAKE_CASE : str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_A , _A ) ] } if return_attention_mask is not False: __SCREAMING_SNAKE_CASE : Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __SCREAMING_SNAKE_CASE : str = attention_mask return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A ) def UpperCAmelCase__ ( self : int , _A : BatchEncoding , _A : DPRReaderOutput , _A : int = 16 , _A : int = 64 , _A : int = 4 , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = reader_input['''input_ids'''] __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = reader_output[:3] __SCREAMING_SNAKE_CASE : int = len(_A ) __SCREAMING_SNAKE_CASE : str = sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ ) __SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __SCREAMING_SNAKE_CASE : Optional[int] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __SCREAMING_SNAKE_CASE : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.pad_token_id ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A ) __SCREAMING_SNAKE_CASE : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_A ) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCAmelCase__ ( self : List[str] , _A : List[int] , _A : List[int] , _A : int , _A : int , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [] for start_index, start_score in enumerate(_A ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(_A , key=lambda _A : x[1] , reverse=_A ) __SCREAMING_SNAKE_CASE : int = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]''' __SCREAMING_SNAKE_CASE : Dict = end_index - start_index + 1 assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_A ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCAmelCase__ ) class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] lowerCAmelCase_ = DPRReaderTokenizer
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
import os from collections import deque import torch from torch.utils.data import Dataset class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Dict="" , _A : Any="train" ): """simple docstring""" assert os.path.isdir(_A ) __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : List[Any] = os.listdir(_A ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , _A ) if not os.path.isfile(_A ): continue self.documents.append(_A ) def __len__( self : Tuple ): """simple docstring""" return len(self.documents ) def __getitem__( self : Any , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.documents[idx] __SCREAMING_SNAKE_CASE : int = document_path.split('''/''' )[-1] with open(_A , encoding='''utf-8''' ) as source: __SCREAMING_SNAKE_CASE : List[str] = source.read() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = process_story(_A ) return document_name, story_lines, summary_lines def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = list(filter(lambda snake_case : len(snake_case ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) ) # for some unknown reason some lines miss a period, add it __SCREAMING_SNAKE_CASE : List[Any] = [_add_missing_period(snake_case ) for line in nonempty_lines] # gather article lines __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : int = deque(snake_case ) while True: try: __SCREAMING_SNAKE_CASE : str = lines.popleft() if element.startswith('''@highlight''' ): break story_lines.append(snake_case ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda snake_case : not t.startswith('''@highlight''' ) , snake_case ) ) return story_lines, summary_lines def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight''' ): return line if line[-1] in END_TOKENS: return line return line + "." def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" if len(snake_case ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(snake_case )) ) return sequence def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = torch.ones_like(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = sequence == pad_token_id __SCREAMING_SNAKE_CASE : Optional[Any] = 0 return mask def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [tokenizer.encode(snake_case ) for line in story_lines] __SCREAMING_SNAKE_CASE : List[Any] = [token for sentence in story_lines_token_ids for token in sentence] __SCREAMING_SNAKE_CASE : str = [tokenizer.encode(snake_case ) for line in summary_lines] __SCREAMING_SNAKE_CASE : List[Any] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [] for sequence in batch: __SCREAMING_SNAKE_CASE : List[Any] = -1 __SCREAMING_SNAKE_CASE : Optional[int] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(snake_case ) return torch.tensor(snake_case )
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""", # See all Marian models at https://huggingface.co/models?filter=marian } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''marian''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , _A : Tuple=5_8101 , _A : Tuple=None , _A : Tuple=1024 , _A : Union[str, Any]=12 , _A : Dict=4096 , _A : Union[str, Any]=16 , _A : int=12 , _A : List[str]=4096 , _A : Optional[int]=16 , _A : Optional[Any]=0.0 , _A : Any=0.0 , _A : Optional[Any]=True , _A : Dict=True , _A : Any="gelu" , _A : int=1024 , _A : Dict=0.1 , _A : Any=0.0 , _A : str=0.0 , _A : Optional[int]=0.02 , _A : int=5_8100 , _A : Optional[Any]=False , _A : Optional[int]=5_8100 , _A : Optional[Any]=0 , _A : Optional[Any]=0 , _A : Any=True , **_A : List[Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = decoder_vocab_size or vocab_size __SCREAMING_SNAKE_CASE : str = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[Any] = d_model __SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim __SCREAMING_SNAKE_CASE : str = encoder_layers __SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads __SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim __SCREAMING_SNAKE_CASE : int = decoder_layers __SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads __SCREAMING_SNAKE_CASE : Dict = dropout __SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout __SCREAMING_SNAKE_CASE : Any = activation_dropout __SCREAMING_SNAKE_CASE : Tuple = activation_function __SCREAMING_SNAKE_CASE : Any = init_std __SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layerdrop __SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop __SCREAMING_SNAKE_CASE : Any = use_cache __SCREAMING_SNAKE_CASE : str = encoder_layers __SCREAMING_SNAKE_CASE : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True __SCREAMING_SNAKE_CASE : Union[str, Any] = share_encoder_decoder_embeddings super().__init__( pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def UpperCAmelCase__ ( self : Dict ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __SCREAMING_SNAKE_CASE : List[str] = {0: '''batch'''} __SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''decoder_sequence'''} __SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_A , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.num_layers for i in range(_A ): __SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} __SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def UpperCAmelCase__ ( self : int ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE : Optional[Any] = super().outputs else: __SCREAMING_SNAKE_CASE : List[Any] = super(_A , self ).outputs if self.use_past: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.num_layers for i in range(_A ): __SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''} __SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def UpperCAmelCase__ ( self : Tuple , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder( _A , _A , _A , _A , _A ) # Generate decoder inputs __SCREAMING_SNAKE_CASE : int = seq_length if not self.use_past else 1 __SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_encoder_and_decoder( _A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Any = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} __SCREAMING_SNAKE_CASE : Union[str, Any] = dict(**_A , **_A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs['''input_ids'''].shape __SCREAMING_SNAKE_CASE : int = common_inputs['''decoder_input_ids'''].shape[1] __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __SCREAMING_SNAKE_CASE : int = decoder_seq_length + 3 __SCREAMING_SNAKE_CASE : int = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __SCREAMING_SNAKE_CASE : List[str] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_A , _A )] , dim=1 ) __SCREAMING_SNAKE_CASE : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers __SCREAMING_SNAKE_CASE : int = min(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = max(_A , _A ) - min_num_layers __SCREAMING_SNAKE_CASE : List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_A ): common_inputs["past_key_values"].append( ( torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), ) ) # TODO: test this. __SCREAMING_SNAKE_CASE : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_A , _A ): common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) ) return common_inputs def UpperCAmelCase__ ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder( _A , _A , _A , _A , _A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __SCREAMING_SNAKE_CASE : int = seqlen + 2 __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __SCREAMING_SNAKE_CASE : Any = common_inputs['''attention_mask'''].dtype __SCREAMING_SNAKE_CASE : Tuple = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 ) __SCREAMING_SNAKE_CASE : Any = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A ) ] return common_inputs def UpperCAmelCase__ ( self : Any , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __SCREAMING_SNAKE_CASE : str = tokenizer.num_special_tokens_to_add(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A ) # Generate dummy inputs according to compute batch and sequence __SCREAMING_SNAKE_CASE : int = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __SCREAMING_SNAKE_CASE : Union[str, Any] = dict(tokenizer(_A , return_tensors=_A ) ) return common_inputs def UpperCAmelCase__ ( self : str , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) else: __SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_causal_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) return common_inputs def UpperCAmelCase__ ( self : Tuple , _A : int , _A : Optional[Any] , _A : List[str] , _A : List[str] ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(_A , _A , _A , _A ) else: __SCREAMING_SNAKE_CASE : List[Any] = super(_A , self )._flatten_past_key_values_( _A , _A , _A , _A ) @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return 1e-4
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = """▁""" lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowercase_ = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } lowercase_ = { """facebook/xglm-564M""": 2_048, } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self : Any , _A : str , _A : List[str]="<s>" , _A : str="</s>" , _A : Optional[Any]="</s>" , _A : Optional[Any]="<s>" , _A : int="<unk>" , _A : Optional[Any]="<pad>" , _A : Optional[Dict[str, Any]] = None , **_A : Any , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __SCREAMING_SNAKE_CASE : Union[str, Any] = 7 __SCREAMING_SNAKE_CASE : List[str] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) __SCREAMING_SNAKE_CASE : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __SCREAMING_SNAKE_CASE : Optional[Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token __SCREAMING_SNAKE_CASE : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} __SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) __SCREAMING_SNAKE_CASE : Any = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_A ) __SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy() __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[int] , _A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase__ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a __SCREAMING_SNAKE_CASE : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase__ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) def UpperCAmelCase__ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase__ ( self : List[Any] , _A : str ): """simple docstring""" return self.sp_model.encode(_A , out_type=_A ) def UpperCAmelCase__ ( self : List[Any] , _A : List[str] ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __SCREAMING_SNAKE_CASE : Dict = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase__ ( self : Tuple , _A : Dict ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase__ ( self : str , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = ''''''.join(_A ).replace(_A , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : int , _A : str , _A : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : int = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowercase_ = logging.getLogger(__name__) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''train''' lowerCAmelCase_ = '''dev''' lowerCAmelCase_ = '''test''' class __UpperCamelCase : """simple docstring""" @staticmethod def UpperCAmelCase__ ( _A : int , _A : Union[Split, str] ): """simple docstring""" raise NotImplementedError @staticmethod def UpperCAmelCase__ ( _A : str ): """simple docstring""" raise NotImplementedError @staticmethod def UpperCAmelCase__ ( _A : List[InputExample] , _A : List[str] , _A : int , _A : PreTrainedTokenizer , _A : Tuple=False , _A : List[str]="[CLS]" , _A : Tuple=1 , _A : str="[SEP]" , _A : Dict=False , _A : int=False , _A : Union[str, Any]=0 , _A : str=0 , _A : Optional[Any]=-100 , _A : int=0 , _A : Any=True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = {label: i for i, label in enumerate(_A )} __SCREAMING_SNAKE_CASE : Dict = [] for ex_index, example in enumerate(_A ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , _A , len(_A ) ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for word, label in zip(example.words , example.labels ): __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(_A ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_A ) > 0: tokens.extend(_A ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __SCREAMING_SNAKE_CASE : Tuple = tokenizer.num_special_tokens_to_add() if len(_A ) > max_seq_length - special_tokens_count: __SCREAMING_SNAKE_CASE : str = tokens[: (max_seq_length - special_tokens_count)] __SCREAMING_SNAKE_CASE : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __SCREAMING_SNAKE_CASE : Any = [sequence_a_segment_id] * len(_A ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __SCREAMING_SNAKE_CASE : Optional[Any] = [cls_token] + tokens __SCREAMING_SNAKE_CASE : str = [pad_token_label_id] + label_ids __SCREAMING_SNAKE_CASE : int = [cls_token_segment_id] + segment_ids __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(_A ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __SCREAMING_SNAKE_CASE : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(_A ) # Zero-pad up to the sequence length. __SCREAMING_SNAKE_CASE : List[Any] = max_seq_length - len(_A ) if pad_on_left: __SCREAMING_SNAKE_CASE : Union[str, Any] = ([pad_token] * padding_length) + input_ids __SCREAMING_SNAKE_CASE : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __SCREAMING_SNAKE_CASE : Any = ([pad_token_segment_id] * padding_length) + segment_ids __SCREAMING_SNAKE_CASE : List[str] = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(_A ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(_A ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(_A ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(_A ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(_A ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __SCREAMING_SNAKE_CASE : List[Any] = None features.append( InputFeatures( input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = nn.CrossEntropyLoss().ignore_index def __init__( self : List[Any] , _A : TokenClassificationTask , _A : str , _A : PreTrainedTokenizer , _A : List[str] , _A : str , _A : Optional[int] = None , _A : Optional[int]=False , _A : Split = Split.train , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = os.path.join( _A , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __SCREAMING_SNAKE_CASE : str = cached_features_file + '''.lock''' with FileLock(_A ): if os.path.exists(_A ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_A ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) __SCREAMING_SNAKE_CASE : Dict = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __SCREAMING_SNAKE_CASE : Dict = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , _A ) def __len__( self : Union[str, Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : Union[str, Any] , _A : Dict ): """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = -1_00 def __init__( self : Dict , _A : TokenClassificationTask , _A : str , _A : PreTrainedTokenizer , _A : List[str] , _A : str , _A : Optional[int] = None , _A : List[Any]=False , _A : Split = Split.train , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __SCREAMING_SNAKE_CASE : List[str] = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __SCREAMING_SNAKE_CASE : int = tf.data.Dataset.from_generator( _A , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.data.Dataset.from_generator( _A , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Union[str, Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : Union[str, Any] , _A : Dict ): """simple docstring""" return self.features[i]
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
def a__ ( snake_case ): """simple docstring""" if not isinstance(snake_case , snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowercase_ = logging.get_logger("""transformers.models.speecht5""") def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" hf_model.apply_weight_norm() __SCREAMING_SNAKE_CASE : Tuple = checkpoint['''input_conv.weight_g'''] __SCREAMING_SNAKE_CASE : Any = checkpoint['''input_conv.weight_v'''] __SCREAMING_SNAKE_CASE : Optional[int] = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): __SCREAMING_SNAKE_CASE : Any = checkpoint[F'''upsamples.{i}.1.weight_g'''] __SCREAMING_SNAKE_CASE : List[Any] = checkpoint[F'''upsamples.{i}.1.weight_v'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] __SCREAMING_SNAKE_CASE : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] __SCREAMING_SNAKE_CASE : int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] __SCREAMING_SNAKE_CASE : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] __SCREAMING_SNAKE_CASE : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] __SCREAMING_SNAKE_CASE : List[Any] = checkpoint['''output_conv.1.weight_g'''] __SCREAMING_SNAKE_CASE : Tuple = checkpoint['''output_conv.1.weight_v'''] __SCREAMING_SNAKE_CASE : List[Any] = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def a__ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ): """simple docstring""" if config_path is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(snake_case ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaHifiGanConfig() __SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.load(snake_case ) load_weights(orig_checkpoint['''model''']['''generator'''] , snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Any = np.load(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stats[1].reshape(-1 ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(snake_case ).float() __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(snake_case ).float() model.save_pretrained(snake_case ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(snake_case ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCAmelCase_ = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCAmelCase_ = '''document_qa''' lowerCAmelCase_ = AutoProcessor lowerCAmelCase_ = VisionEncoderDecoderModel lowerCAmelCase_ = ['''image''', '''text'''] lowerCAmelCase_ = ['''text'''] def __init__( self : Tuple , *_A : Tuple , **_A : Tuple ): """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*_A , **_A ) def UpperCAmelCase__ ( self : List[Any] , _A : "Image" , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __SCREAMING_SNAKE_CASE : str = task_prompt.replace('''{user_input}''' , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.pre_processor.tokenizer( _A , add_special_tokens=_A , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE : int = self.pre_processor(_A , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] ): """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_A , ).sequences def UpperCAmelCase__ ( self : Dict , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.pre_processor.batch_decode(_A )[0] __SCREAMING_SNAKE_CASE : List[str] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __SCREAMING_SNAKE_CASE : int = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __SCREAMING_SNAKE_CASE : List[Any] = re.sub(r'''<.*?>''' , '''''' , _A , count=1 ).strip() # remove first task start token __SCREAMING_SNAKE_CASE : Optional[int] = self.pre_processor.tokenajson(_A ) return sequence["answer"]
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str , _A : List[str] , _A : str=7 , _A : Dict=3 , _A : Tuple=18 , _A : Optional[Any]=30 , _A : str=400 , _A : str=True , _A : Tuple=None , _A : Optional[int]=True , _A : Optional[Any]=None , _A : Optional[int]=True , _A : Dict=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _A : List[str]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _A : Any=True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = size if size is not None else {'''height''': 224, '''width''': 224} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : Optional[Any] = parent __SCREAMING_SNAKE_CASE : Optional[Any] = batch_size __SCREAMING_SNAKE_CASE : List[str] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Tuple = max_resolution __SCREAMING_SNAKE_CASE : Any = do_resize __SCREAMING_SNAKE_CASE : Tuple = size __SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop __SCREAMING_SNAKE_CASE : List[str] = crop_size __SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize __SCREAMING_SNAKE_CASE : str = image_mean __SCREAMING_SNAKE_CASE : int = image_std __SCREAMING_SNAKE_CASE : Union[str, Any] = do_convert_rgb def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCAmelCase__ ( self : List[str] , _A : Optional[int]=False , _A : int=False , _A : int=False ): """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __SCREAMING_SNAKE_CASE : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __SCREAMING_SNAKE_CASE : Any = [] for i in range(self.batch_size ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] if torchify: __SCREAMING_SNAKE_CASE : List[str] = [torch.from_numpy(_A ) for x in image_inputs] return image_inputs @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=_A ) @property def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : str = self.image_processor_tester.prepare_inputs(equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_A ) __SCREAMING_SNAKE_CASE : int = 3 @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" pass def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
from __future__ import annotations def a__ ( snake_case ): """simple docstring""" return len(set(snake_case ) ) == len(snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase_ = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } lowercase_ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def a__ ( snake_case , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = {} __SCREAMING_SNAKE_CASE : List[str] = R'''.*sequential.(\d+).*''' __SCREAMING_SNAKE_CASE : int = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __SCREAMING_SNAKE_CASE : List[Any] = key.replace(snake_case , snake_case ) if re.match(snake_case , snake_case ): # replace sequential layers with list __SCREAMING_SNAKE_CASE : str = re.match(snake_case , snake_case ).group(1 ) __SCREAMING_SNAKE_CASE : Optional[int] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(snake_case )//3}.linear.''' ) elif re.match(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = int(re.match(snake_case , snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __SCREAMING_SNAKE_CASE : Optional[Any] = 1 if projecton_layer == 0 else 2 __SCREAMING_SNAKE_CASE : List[Any] = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value __SCREAMING_SNAKE_CASE : Optional[int] = value __SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_qkv.size(0 ) // 3 __SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] __SCREAMING_SNAKE_CASE : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2] __SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_qkv[qkv_dim * 2 :] __SCREAMING_SNAKE_CASE : List[Any] = query_layer __SCREAMING_SNAKE_CASE : Optional[Any] = key_layer __SCREAMING_SNAKE_CASE : Union[str, Any] = value_layer else: __SCREAMING_SNAKE_CASE : List[str] = value return model_state_dict def a__ ( snake_case , snake_case , snake_case , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = init_clap(snake_case , enable_fusion=snake_case ) clap_model.eval() __SCREAMING_SNAKE_CASE : List[str] = clap_model.state_dict() __SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : Dict = ClapConfig() __SCREAMING_SNAKE_CASE : Any = enable_fusion __SCREAMING_SNAKE_CASE : Optional[int] = ClapModel(snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case , strict=snake_case ) model.save_pretrained(snake_case ) transformers_config.save_pretrained(snake_case ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") lowercase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
def a__ ( snake_case , snake_case , snake_case = 0 , snake_case = 0 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = right or len(snake_case ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(snake_case , snake_case , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase_ = logging.get_logger(__name__) lowercase_ = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''deta''' lowerCAmelCase_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : List[str] , _A : Dict=None , _A : Optional[int]=900 , _A : Dict=2048 , _A : int=6 , _A : Dict=2048 , _A : List[Any]=8 , _A : Union[str, Any]=6 , _A : Tuple=1024 , _A : Tuple=8 , _A : Dict=0.0 , _A : List[Any]=True , _A : List[Any]="relu" , _A : Tuple=256 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.0 , _A : Dict=0.0 , _A : int=0.02 , _A : int=1.0 , _A : Optional[Any]=True , _A : Any=False , _A : Optional[int]="sine" , _A : Optional[int]=5 , _A : List[Any]=4 , _A : Tuple=4 , _A : Dict=True , _A : Tuple=300 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[Any]=1 , _A : Dict=5 , _A : Optional[int]=2 , _A : Tuple=1 , _A : Union[str, Any]=1 , _A : Union[str, Any]=5 , _A : Tuple=2 , _A : str=0.1 , _A : Optional[int]=0.25 , **_A : Optional[Any] , ): """simple docstring""" if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : int = backbone_config.pop('''model_type''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] __SCREAMING_SNAKE_CASE : Any = config_class.from_dict(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config __SCREAMING_SNAKE_CASE : Tuple = num_queries __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Tuple = d_model __SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim __SCREAMING_SNAKE_CASE : int = encoder_layers __SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads __SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim __SCREAMING_SNAKE_CASE : str = decoder_layers __SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads __SCREAMING_SNAKE_CASE : Tuple = dropout __SCREAMING_SNAKE_CASE : int = attention_dropout __SCREAMING_SNAKE_CASE : Dict = activation_dropout __SCREAMING_SNAKE_CASE : str = activation_function __SCREAMING_SNAKE_CASE : List[str] = init_std __SCREAMING_SNAKE_CASE : Union[str, Any] = init_xavier_std __SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop __SCREAMING_SNAKE_CASE : Tuple = auxiliary_loss __SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type # deformable attributes __SCREAMING_SNAKE_CASE : Dict = num_feature_levels __SCREAMING_SNAKE_CASE : Optional[int] = encoder_n_points __SCREAMING_SNAKE_CASE : Dict = decoder_n_points __SCREAMING_SNAKE_CASE : List[str] = two_stage __SCREAMING_SNAKE_CASE : Dict = two_stage_num_proposals __SCREAMING_SNAKE_CASE : List[Any] = with_box_refine __SCREAMING_SNAKE_CASE : Optional[Any] = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher __SCREAMING_SNAKE_CASE : Optional[int] = class_cost __SCREAMING_SNAKE_CASE : List[Any] = bbox_cost __SCREAMING_SNAKE_CASE : str = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE : Optional[int] = mask_loss_coefficient __SCREAMING_SNAKE_CASE : Tuple = dice_loss_coefficient __SCREAMING_SNAKE_CASE : int = bbox_loss_coefficient __SCREAMING_SNAKE_CASE : str = giou_loss_coefficient __SCREAMING_SNAKE_CASE : int = eos_coefficient __SCREAMING_SNAKE_CASE : List[str] = focal_alpha super().__init__(is_encoder_decoder=_A , **_A ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self.d_model def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE : List[Any] = self.backbone_config.to_dict() __SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type return output
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def UpperCAmelCase__ ( self : List[Any] , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig( do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A , config_name=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_pretrained(_A , config_name=_A ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , _A ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , _A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained('''gpt2''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_model_config(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(_A , _A ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = GenerationConfig() __SCREAMING_SNAKE_CASE : Any = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } __SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_A ) __SCREAMING_SNAKE_CASE : Any = generation_config.update(**_A ) # update_kwargs was not modified (no side effects) self.assertEqual(_A , _A ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(_A , {'''foo''': '''bar'''} ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = GenerationConfig() __SCREAMING_SNAKE_CASE : str = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained(_A ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) __SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_model_config(_A ) assert not hasattr(_A , '''foo''' ) # no new kwargs should be initialized if from config def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , _A ) self.assertEqual(default_config.num_beams , 1 ) __SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig( do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , _A ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(_A , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , _A ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase__ ( cls : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN HfFolder.save_token(_A ) @classmethod def UpperCAmelCase__ ( cls : List[str] ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig( do_sample=_A , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _A , repo_id='''test-generation-config''' , push_to_hub=_A , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A ) ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = GenerationConfig( do_sample=_A , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_A , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_A , getattr(_A , _A ) )
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def a__ ( snake_case ): """simple docstring""" return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(snake_case ) EnvironmentCommand.register_subcommand(snake_case ) TestCommand.register_subcommand(snake_case ) RunBeamCommand.register_subcommand(snake_case ) DummyDataCommand.register_subcommand(snake_case ) # Parse args __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = parser.parse_known_args() if not hasattr(snake_case , '''func''' ): parser.print_help() exit(1 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_unknown_args(snake_case ) # Run __SCREAMING_SNAKE_CASE : List[str] = args.func(snake_case , **snake_case ) service.run() if __name__ == "__main__": main()
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowercase_ = 5 lowercase_ = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = SpeechaTextTokenizer lowerCAmelCase_ = False lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : Union[str, Any] = sp.SentencePieceProcessor() spm_model.Load(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_A ) )] __SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) __SCREAMING_SNAKE_CASE : List[str] = Path(self.tmpdirname ) save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) __SCREAMING_SNAKE_CASE : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>''' __SCREAMING_SNAKE_CASE : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 1001 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [289, 50, 14, 174, 386] , ) __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual(_A , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = '''valhalla/s2t_mustc_multilinguial_medium''' lowerCAmelCase_ = '''C\'est trop cool''' lowerCAmelCase_ = '''Esto es genial''' @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_0000 ) def UpperCAmelCase__ ( self : int ): """simple docstring""" self.assertIn(_A , self.tokenizer.all_special_ids ) __SCREAMING_SNAKE_CASE : Any = [ES_CODE, 4, 1601, 47, 7647, 2] __SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(_A , skip_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A ) self.assertEqual(_A , _A ) self.assertNotIn(self.tokenizer.eos_token , _A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = '''fr''' __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _A ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __SCREAMING_SNAKE_CASE : Optional[int] = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
def a__ ( snake_case ): """simple docstring""" if n == 1 or not isinstance(snake_case , snake_case ): return 0 elif n == 2: return 1 else: __SCREAMING_SNAKE_CASE : Any = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : Optional[int] = 2 while digits < n: index += 1 __SCREAMING_SNAKE_CASE : List[Any] = len(str(fibonacci(snake_case ) ) ) return index def a__ ( snake_case = 1_000 ): """simple docstring""" return fibonacci_digits_index(snake_case ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowercase_ = logging.getLogger(__name__) def a__ ( snake_case=2 , snake_case=3 , snake_case=16 , snake_case = 10 , snake_case = 2 ): """simple docstring""" def get_dataset(snake_case ): __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __SCREAMING_SNAKE_CASE : str = get_dataset(snake_case ) __SCREAMING_SNAKE_CASE : Dict = get_dataset(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) __SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [] for epoch in range(snake_case ): # Train quickly model.train() for batch in dataloader: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = batch __SCREAMING_SNAKE_CASE : List[Any] = model(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(snake_case , snake_case ) accelerator.backward(snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __UpperCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.randn(1 ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.randn(1 ) ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] ): """simple docstring""" return x * self.a + self.b class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[Any] = DummyModel() __SCREAMING_SNAKE_CASE : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dummy_dataloaders() __SCREAMING_SNAKE_CASE : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A ) # Train baseline __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare( _A , _A , _A , _A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel() __SCREAMING_SNAKE_CASE : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dummy_dataloaders() # Train baseline __SCREAMING_SNAKE_CASE : Optional[int] = Accelerator() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare( _A , _A , _A , _A ) # Save initial __SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_A , '''initial''' ) accelerator.save_state(_A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Dict = optimizer.state_dict() __SCREAMING_SNAKE_CASE : Tuple = train(3 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : List[Any] = optimizer.state_dict() # Train partially set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = DummyModel() __SCREAMING_SNAKE_CASE : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dummy_dataloaders() __SCREAMING_SNAKE_CASE : List[str] = Accelerator() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( _A , _A , _A , _A ) accelerator.load_state(_A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Dict = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Tuple = train(2 , _A , _A , _A , _A ) # Save everything __SCREAMING_SNAKE_CASE : Dict = os.path.join(_A , '''checkpoint''' ) accelerator.save_state(_A ) # Load everything back in and make sure all states work accelerator.load_state(_A ) test_rands += train(1 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Tuple = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : str = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : List[str] = DummyModel() __SCREAMING_SNAKE_CASE : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dummy_dataloaders() __SCREAMING_SNAKE_CASE : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_A ) # Train baseline __SCREAMING_SNAKE_CASE : Any = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = accelerator.prepare( _A , _A , _A , _A ) # Save initial accelerator.save_state() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Tuple = optimizer.state_dict() __SCREAMING_SNAKE_CASE : Any = train(3 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Tuple = optimizer.state_dict() # Train partially set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = DummyModel() __SCREAMING_SNAKE_CASE : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = dummy_dataloaders() __SCREAMING_SNAKE_CASE : Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A ) __SCREAMING_SNAKE_CASE : List[str] = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare( _A , _A , _A , _A ) accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[Any] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Optional[int] = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[int] = train(2 , _A , _A , _A , _A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : int = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3] ) __SCREAMING_SNAKE_CASE : str = torch.tensor([2, 3, 4] ) __SCREAMING_SNAKE_CASE : List[str] = DummyModel() __SCREAMING_SNAKE_CASE : int = torch.optim.Adam(net.parameters() ) __SCREAMING_SNAKE_CASE : int = Accelerator() with self.assertRaises(_A ) as ve: accelerator.register_for_checkpointing(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[str] = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def UpperCAmelCase__ ( self : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : List[str] = DummyModel() __SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE : Tuple = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dummy_dataloaders() __SCREAMING_SNAKE_CASE : int = ProjectConfiguration(automatic_checkpoint_naming=_A ) # Train baseline __SCREAMING_SNAKE_CASE : List[Any] = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( _A , _A , _A , _A , _A ) # Save initial accelerator.save_state() __SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.state_dict() train(3 , _A , _A , _A , _A , _A ) self.assertNotEqual(_A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(_A , scheduler.state_dict() ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = DummyModel() __SCREAMING_SNAKE_CASE : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 ) # Train baseline __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(_A ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_A , env=os.environ.copy() ) if __name__ == "__main__": lowercase_ = """/tmp/accelerate/state_checkpointing""" lowercase_ = DummyModel() lowercase_ = torch.optim.Adam(params=model.parameters(), lr=1e-3) lowercase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowercase_ , lowercase_ = dummy_dataloaders() lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowercase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowercase_ , lowercase_ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowercase_ = group["""params"""][0].device break assert param_device.type == accelerator.device.type lowercase_ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: lowercase_ = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: lowercase_ = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''open-llama''' def __init__( self : List[str] , _A : int=10_0000 , _A : Dict=4096 , _A : int=1_1008 , _A : str=32 , _A : str=32 , _A : Dict="silu" , _A : List[str]=2048 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-6 , _A : Optional[Any]=True , _A : Tuple=0 , _A : List[Any]=1 , _A : str=2 , _A : str=False , _A : Any=True , _A : List[Any]=0.1 , _A : Optional[int]=0.1 , _A : Any=True , _A : Any=True , _A : Optional[int]=None , **_A : Optional[int] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : Dict = hidden_size __SCREAMING_SNAKE_CASE : Tuple = intermediate_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : str = hidden_act __SCREAMING_SNAKE_CASE : Any = initializer_range __SCREAMING_SNAKE_CASE : Dict = rms_norm_eps __SCREAMING_SNAKE_CASE : int = use_cache __SCREAMING_SNAKE_CASE : List[str] = kwargs.pop( '''use_memorry_efficient_attention''' , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : str = attention_dropout_prob __SCREAMING_SNAKE_CASE : List[str] = use_stable_embedding __SCREAMING_SNAKE_CASE : Dict = shared_input_output_embedding __SCREAMING_SNAKE_CASE : int = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'''got {self.rope_scaling}''' ) __SCREAMING_SNAKE_CASE : List[str] = self.rope_scaling.get('''type''' , _A ) __SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
import pickle import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : Optional[int]=0.2 , _A : Tuple=0.2 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = bp_numa __SCREAMING_SNAKE_CASE : Tuple = bp_numa __SCREAMING_SNAKE_CASE : int = bp_numa __SCREAMING_SNAKE_CASE : Tuple = conva_get[:2] __SCREAMING_SNAKE_CASE : Optional[int] = conva_get[2] __SCREAMING_SNAKE_CASE : List[Any] = size_pa __SCREAMING_SNAKE_CASE : Any = rate_w __SCREAMING_SNAKE_CASE : Union[str, Any] = rate_t __SCREAMING_SNAKE_CASE : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] __SCREAMING_SNAKE_CASE : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __SCREAMING_SNAKE_CASE : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __SCREAMING_SNAKE_CASE : Dict = -2 * np.random.rand(self.conva[1] ) + 1 __SCREAMING_SNAKE_CASE : int = -2 * np.random.rand(self.num_bpa ) + 1 __SCREAMING_SNAKE_CASE : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1 def UpperCAmelCase__ ( self : Optional[int] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_A , '''wb''' ) as f: pickle.dump(_A , _A ) print(F'''Model saved: {save_path}''' ) @classmethod def UpperCAmelCase__ ( cls : List[str] , _A : int ): """simple docstring""" with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Tuple = pickle.load(_A ) # noqa: S301 __SCREAMING_SNAKE_CASE : str = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) __SCREAMING_SNAKE_CASE : Dict = model_dic.get('''size_pooling1''' ) __SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('''num_bp1''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model_dic.get('''num_bp2''' ) __SCREAMING_SNAKE_CASE : Any = model_dic.get('''num_bp3''' ) __SCREAMING_SNAKE_CASE : Any = model_dic.get('''rate_weight''' ) __SCREAMING_SNAKE_CASE : int = model_dic.get('''rate_thre''' ) # create model instance __SCREAMING_SNAKE_CASE : Tuple = CNN(_A , _A , _A , _A , _A , _A , _A ) # modify model parameter __SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get('''w_conv1''' ) __SCREAMING_SNAKE_CASE : Dict = model_dic.get('''wkj''' ) __SCREAMING_SNAKE_CASE : Any = model_dic.get('''vji''' ) __SCREAMING_SNAKE_CASE : str = model_dic.get('''thre_conv1''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''thre_bp2''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model_dic.get('''thre_bp3''' ) return conv_ins def UpperCAmelCase__ ( self : List[Any] , _A : List[Any] ): """simple docstring""" return 1 / (1 + np.exp(-1 * x )) def UpperCAmelCase__ ( self : str , _A : List[str] ): """simple docstring""" return round(_A , 3 ) def UpperCAmelCase__ ( self : int , _A : int , _A : str , _A : List[Any] , _A : Tuple , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = convs[0] __SCREAMING_SNAKE_CASE : Any = convs[1] __SCREAMING_SNAKE_CASE : int = np.shape(_A )[0] # get the data slice of original image data, data_focus __SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _A ): for j_focus in range(0 , size_data - size_conv + 1 , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A ) # calculate the feature map of every single kernel, and saved as list of matrix __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : List[str] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_A ): __SCREAMING_SNAKE_CASE : List[Any] = [] for i_focus in range(len(_A ) ): __SCREAMING_SNAKE_CASE : int = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_A ) ) __SCREAMING_SNAKE_CASE : str = np.asmatrix(_A ).reshape( _A , _A ) data_featuremap.append(_A ) # expanding the data slice to One dimenssion __SCREAMING_SNAKE_CASE : int = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(_A ) return focus_list, data_featuremap def UpperCAmelCase__ ( self : List[str] , _A : str , _A : Union[str, Any] , _A : Optional[int]="average_pool" ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = len(featuremaps[0] ) __SCREAMING_SNAKE_CASE : List[Any] = int(size_map / size_pooling ) __SCREAMING_SNAKE_CASE : Optional[Any] = [] for i_map in range(len(_A ) ): __SCREAMING_SNAKE_CASE : Tuple = featuremaps[i_map] __SCREAMING_SNAKE_CASE : Dict = [] for i_focus in range(0 , _A , _A ): for j_focus in range(0 , _A , _A ): __SCREAMING_SNAKE_CASE : Tuple = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.asmatrix(_A ).reshape(_A , _A ) featuremap_pooled.append(_A ) return featuremap_pooled def UpperCAmelCase__ ( self : Dict , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [] for i in range(len(_A ) ): __SCREAMING_SNAKE_CASE : Optional[int] = np.shape(data[i] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) __SCREAMING_SNAKE_CASE : List[Any] = data_listed.getA().tolist()[0] data_expanded.extend(_A ) __SCREAMING_SNAKE_CASE : Tuple = np.asarray(_A ) return data_expanded def UpperCAmelCase__ ( self : List[Any] , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = np.asarray(_A ) __SCREAMING_SNAKE_CASE : List[str] = np.shape(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def UpperCAmelCase__ ( self : Dict , _A : Any , _A : Optional[Any] , _A : List[Any] , _A : Tuple , _A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : List[str] = 0 for i_map in range(_A ): __SCREAMING_SNAKE_CASE : Dict = np.ones((size_map, size_map) ) for i in range(0 , _A , _A ): for j in range(0 , _A , _A ): __SCREAMING_SNAKE_CASE : Union[str, Any] = pd_pool[ i_pool ] __SCREAMING_SNAKE_CASE : List[str] = i_pool + 1 __SCREAMING_SNAKE_CASE : Optional[Any] = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_A ) return pd_all def UpperCAmelCase__ ( self : List[str] , _A : List[Any] , _A : str , _A : Any , _A : Union[str, Any] , _A : List[Any] , _A : Any=bool ): """simple docstring""" print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_A )) ) print((''' - - Shape: Teach_Data ''', np.shape(_A )) ) __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : str = 1_0000 while rp < n_repeat and mse >= error_accuracy: __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(_A ) ): # print('------------Learning Image: %d--------------'%p) __SCREAMING_SNAKE_CASE : Tuple = np.asmatrix(datas_train[p] ) __SCREAMING_SNAKE_CASE : List[str] = np.asarray(datas_teach[p] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.pooling(_A , self.size_poolinga ) __SCREAMING_SNAKE_CASE : List[str] = np.shape(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = self._expand(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = data_bp_input __SCREAMING_SNAKE_CASE : Tuple = np.dot(_A , self.vji.T ) - self.thre_bpa __SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(_A , self.wkj.T ) - self.thre_bpa __SCREAMING_SNAKE_CASE : Dict = self.sig(_A ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- __SCREAMING_SNAKE_CASE : Union[str, Any] = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa) ) ) __SCREAMING_SNAKE_CASE : str = np.multiply( np.dot(_A , self.wkj ) , np.multiply(_A , (1 - bp_outa) ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(_A , self.vji ) __SCREAMING_SNAKE_CASE : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga) __SCREAMING_SNAKE_CASE : Dict = pd_conva_pooled.T.getA().tolist() __SCREAMING_SNAKE_CASE : Optional[Any] = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): __SCREAMING_SNAKE_CASE : str = self._expand_mat(pd_conva_all[k_conv] ) __SCREAMING_SNAKE_CASE : Any = self.rate_weight * np.dot(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) __SCREAMING_SNAKE_CASE : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer __SCREAMING_SNAKE_CASE : List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight __SCREAMING_SNAKE_CASE : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre __SCREAMING_SNAKE_CASE : int = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image __SCREAMING_SNAKE_CASE : Dict = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) __SCREAMING_SNAKE_CASE : List[str] = rp + 1 __SCREAMING_SNAKE_CASE : List[Any] = error_count / patterns all_mse.append(_A ) def draw_error(): __SCREAMING_SNAKE_CASE : Optional[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_A , '''+-''' ) plt.plot(_A , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_A , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def UpperCAmelCase__ ( self : Tuple , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_A )) ) for p in range(len(_A ) ): __SCREAMING_SNAKE_CASE : int = np.asmatrix(datas_test[p] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.pooling(_A , self.size_poolinga ) __SCREAMING_SNAKE_CASE : Dict = self._expand(_A ) __SCREAMING_SNAKE_CASE : str = data_bp_input __SCREAMING_SNAKE_CASE : Any = bp_outa * self.vji.T - self.thre_bpa __SCREAMING_SNAKE_CASE : Optional[int] = self.sig(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa __SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(_A ) produce_out.extend(bp_outa.getA().tolist() ) __SCREAMING_SNAKE_CASE : int = [list(map(self.do_round , _A ) ) for each in produce_out] return np.asarray(_A ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = np.asmatrix(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.pooling(_A , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): lowercase_ = True from torch.cuda.amp import autocast lowercase_ = logging.getLogger(__name__) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) lowerCAmelCase_ = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) lowerCAmelCase_ = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) lowerCAmelCase_ = field( default=0.999995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def a__ ( snake_case , snake_case ): """simple docstring""" logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) __SCREAMING_SNAKE_CASE : int = logging.WARNING if model_args.verbose_logging: __SCREAMING_SNAKE_CASE : int = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __SCREAMING_SNAKE_CASE : Dict = logging.INFO logger.setLevel(snake_case ) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) lowerCAmelCase_ = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) lowerCAmelCase_ = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) lowerCAmelCase_ = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) lowerCAmelCase_ = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = "longest" lowerCAmelCase_ = None lowerCAmelCase_ = None def __call__( self : List[Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor.pad( _A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] ) __SCREAMING_SNAKE_CASE : Any = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __SCREAMING_SNAKE_CASE : str = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to( torch.long ) __SCREAMING_SNAKE_CASE : Tuple = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device ) # these two operations makes sure that all values # before the output lengths indices are attended to __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices __SCREAMING_SNAKE_CASE : Dict = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_A , min_masks=2 , ) return batch class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , *_A : Dict , _A : str=1 , _A : int=0 , _A : List[Any]=1.0 , **_A : str ): """simple docstring""" super().__init__(*_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : List[Any] = max_gumbel_temp __SCREAMING_SNAKE_CASE : List[Any] = min_gumbel_temp __SCREAMING_SNAKE_CASE : Union[str, Any] = gumbel_temp_decay def UpperCAmelCase__ ( self : Optional[Any] , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ): """simple docstring""" model.train() __SCREAMING_SNAKE_CASE : Any = self._prepare_inputs(_A ) if self.use_amp: with autocast(): __SCREAMING_SNAKE_CASE : Dict = self.compute_loss(_A , _A ) else: __SCREAMING_SNAKE_CASE : Dict = self.compute_loss(_A , _A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __SCREAMING_SNAKE_CASE : Optional[Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __SCREAMING_SNAKE_CASE : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __SCREAMING_SNAKE_CASE : Dict = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_A ).backward() elif self.use_apex: with amp.scale_loss(_A , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() configure_logger(snake_case , snake_case ) # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __SCREAMING_SNAKE_CASE : str = DatasetDict() __SCREAMING_SNAKE_CASE : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) __SCREAMING_SNAKE_CASE : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" __SCREAMING_SNAKE_CASE : Tuple = DatasetDict() __SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , ) __SCREAMING_SNAKE_CASE : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported __SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case ) def prepare_dataset(snake_case ): # check that all files have the correct sampling rate __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __SCREAMING_SNAKE_CASE : List[Any] = datasets.map( snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names ) # filter audio files that are too long __SCREAMING_SNAKE_CASE : Dict = vectorized_datasets.filter( lambda snake_case : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(snake_case ): return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __SCREAMING_SNAKE_CASE : int = vectorized_datasets.map( snake_case , batched=snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( '''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and''' ''' ``config.feat_extract_norm=\'layer\'''' ) __SCREAMING_SNAKE_CASE : List[str] = WavaVecaForPreTraining(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = DataCollatorForWavaVecaPretraining(model=snake_case , feature_extractor=snake_case ) __SCREAMING_SNAKE_CASE : Dict = WavaVecaPreTrainer( model=snake_case , data_collator=snake_case , args=snake_case , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = PegasusTokenizer lowerCAmelCase_ = PegasusTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE : Optional[Any] = PegasusTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def UpperCAmelCase__ ( self : Tuple , **_A : Any ): """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : Tuple ): """simple docstring""" return ("This is a test", "This is a test") def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''</s>''' __SCREAMING_SNAKE_CASE : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(_A ) , 1103 ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Tuple = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0] __SCREAMING_SNAKE_CASE : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0] self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __SCREAMING_SNAKE_CASE : Dict = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __SCREAMING_SNAKE_CASE : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] __SCREAMING_SNAKE_CASE : Any = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0] self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 __SCREAMING_SNAKE_CASE : List[str] = '''To ensure a smooth flow of bank resolutions.''' __SCREAMING_SNAKE_CASE : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0] self.assertListEqual(_A , _A ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = ['''This is going to be way too long.''' * 150, '''short example'''] __SCREAMING_SNAKE_CASE : Dict = ['''not super long but more than 5 tokens''', '''tiny'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE : Dict = self._large_tokenizer( text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(_A ) == 2 # input_ids, attention_mask. @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = PegasusTokenizer lowerCAmelCase_ = PegasusTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE : int = PegasusTokenizer(_A , offset=0 , mask_token_sent=_A , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def UpperCAmelCase__ ( self : Dict , **_A : Tuple ): """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : Union[str, Any] ): """simple docstring""" return ("This is a test", "This is a test") def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Tuple = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0] __SCREAMING_SNAKE_CASE : Tuple = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0] self.assertListEqual(_A , _A ) @require_torch def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = ['''This is going to be way too long.''' * 1000, '''short example'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''not super long but more than 5 tokens''', '''tiny'''] __SCREAMING_SNAKE_CASE : Optional[Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE : Dict = self._large_tokenizer( text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(_A ) == 2 # input_ids, attention_mask. def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __SCREAMING_SNAKE_CASE : Dict = self._large_tokenizer(_A ).input_ids self.assertListEqual( _A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : List[str] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = parent __SCREAMING_SNAKE_CASE : int = 13 __SCREAMING_SNAKE_CASE : Any = 7 __SCREAMING_SNAKE_CASE : List[str] = 30 __SCREAMING_SNAKE_CASE : List[Any] = self.seq_length + self.mem_len __SCREAMING_SNAKE_CASE : Union[str, Any] = 15 __SCREAMING_SNAKE_CASE : Any = True __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = 99 __SCREAMING_SNAKE_CASE : Optional[Any] = [10, 50, 80] __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : Dict = 4 __SCREAMING_SNAKE_CASE : Union[str, Any] = 8 __SCREAMING_SNAKE_CASE : Union[str, Any] = 128 __SCREAMING_SNAKE_CASE : Optional[int] = 2 __SCREAMING_SNAKE_CASE : Optional[Any] = 2 __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Dict = 1 __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : int = 3 __SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1 __SCREAMING_SNAKE_CASE : Optional[Any] = 0.01 def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : List[str] = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCAmelCase__ ( self : int ): """simple docstring""" random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : str , _A : Any , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = TFTransfoXLModel(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = model(_A ).to_tuple() __SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids_a, '''mems''': mems_a} __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCAmelCase__ ( self : Dict , _A : int , _A : List[str] , _A : int , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = TFTransfoXLLMHeadModel(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple() __SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels} __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = model(_A ).to_tuple() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = model([input_ids_a, mems_a] ).to_tuple() __SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = model(_A ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCAmelCase__ ( self : Dict , _A : Tuple , _A : int , _A : Dict , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = TFTransfoXLForSequenceClassification(_A ) __SCREAMING_SNAKE_CASE : int = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCAmelCase_ = () if is_tf_available() else () lowerCAmelCase_ = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = TFTransfoXLModelTester(self ) __SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_A , d_embed=37 ) def UpperCAmelCase__ ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" self.model_tester.set_seed() __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*_A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" self.model_tester.set_seed() __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*_A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE : Optional[int] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Dict = model_class(_A ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings() assert isinstance(_A , tf.keras.layers.Layer ) __SCREAMING_SNAKE_CASE : Any = model.get_bias() assert name is None else: __SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings() assert x is None __SCREAMING_SNAKE_CASE : Dict = model.get_bias() assert name is None def UpperCAmelCase__ ( self : int ): """simple docstring""" pass @slow def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[Any] = TFTransfoXLModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass @require_tf class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __SCREAMING_SNAKE_CASE : List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __SCREAMING_SNAKE_CASE : Optional[int] = model.generate(_A , max_length=200 , do_sample=_A ) self.assertListEqual(output_ids[0].numpy().tolist() , _A )
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = None def a__ ( snake_case , snake_case=0.999 , snake_case="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : Any = [] for i in range(snake_case ): __SCREAMING_SNAKE_CASE : int = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) ) return torch.tensor(snake_case , dtype=torch.floataa ) class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : List[Any] , _A : int = 1000 , _A : str = "fixed_small_log" , _A : bool = True , _A : Optional[float] = 1.0 , _A : str = "epsilon" , _A : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) __SCREAMING_SNAKE_CASE : List[Any] = betas_for_alpha_bar(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : List[Any] = torch.cumprod(self.alphas , dim=0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(1.0 ) # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Tuple = 1.0 # setable values __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Any = torch.from_numpy(np.arange(0 , _A )[::-1].copy() ) __SCREAMING_SNAKE_CASE : Tuple = variance_type def UpperCAmelCase__ ( self : int , _A : torch.FloatTensor , _A : Optional[int] = None ): """simple docstring""" return sample def UpperCAmelCase__ ( self : int , _A : int , _A : Union[str, torch.device] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = num_inference_steps __SCREAMING_SNAKE_CASE : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_A ).to(_A ) def UpperCAmelCase__ ( self : List[str] , _A : List[Any] , _A : List[str]=None , _A : List[Any]=None , _A : List[str]=None ): """simple docstring""" if prev_timestep is None: __SCREAMING_SNAKE_CASE : Optional[int] = t - 1 __SCREAMING_SNAKE_CASE : str = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE : Any = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __SCREAMING_SNAKE_CASE : List[Any] = self.betas[t] else: __SCREAMING_SNAKE_CASE : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __SCREAMING_SNAKE_CASE : Dict = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __SCREAMING_SNAKE_CASE : Any = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __SCREAMING_SNAKE_CASE : Any = torch.log(torch.clamp(_A , min=1e-20 ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __SCREAMING_SNAKE_CASE : Optional[Any] = variance.log() __SCREAMING_SNAKE_CASE : Optional[int] = beta.log() __SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2 __SCREAMING_SNAKE_CASE : str = frac * max_log + (1 - frac) * min_log return variance def UpperCAmelCase__ ( self : Dict , _A : torch.FloatTensor , _A : int , _A : torch.FloatTensor , _A : Optional[int] = None , _A : List[str]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , sample.shape[1] , dim=1 ) else: __SCREAMING_SNAKE_CASE : Tuple = None # 1. compute alphas, betas if prev_timestep is None: __SCREAMING_SNAKE_CASE : List[str] = t - 1 __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE : Optional[Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __SCREAMING_SNAKE_CASE : str = self.betas[t] __SCREAMING_SNAKE_CASE : List[Any] = self.alphas[t] else: __SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev __SCREAMING_SNAKE_CASE : Any = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = torch.clamp( _A , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __SCREAMING_SNAKE_CASE : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __SCREAMING_SNAKE_CASE : List[str] = 0 if t > 0: __SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device ) __SCREAMING_SNAKE_CASE : Any = self._get_variance( _A , predicted_variance=_A , prev_timestep=_A , ) if self.variance_type == "fixed_small_log": __SCREAMING_SNAKE_CASE : List[Any] = variance elif self.variance_type == "learned_range": __SCREAMING_SNAKE_CASE : str = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) __SCREAMING_SNAKE_CASE : List[Any] = variance * variance_noise __SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A ) def UpperCAmelCase__ ( self : str , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.IntTensor , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) __SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5 __SCREAMING_SNAKE_CASE : Dict = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE : Any = sqrt_alpha_prod.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 __SCREAMING_SNAKE_CASE : Any = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
import math def a__ ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( snake_case = 0.1 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = 3 __SCREAMING_SNAKE_CASE : Union[str, Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 while b > 0: if b & 1: __SCREAMING_SNAKE_CASE : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''''' lowerCAmelCase_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCAmelCase_ = None # compression type in fsspec. ex: "gzip" lowerCAmelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : str , _A : str = "" , _A : Optional[str] = None , _A : Optional[dict] = None , **_A : List[Any] ): """simple docstring""" super().__init__(self , **_A ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode __SCREAMING_SNAKE_CASE : Tuple = fsspec.open( _A , mode='''rb''' , protocol=_A , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] ) __SCREAMING_SNAKE_CASE : Optional[int] = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) __SCREAMING_SNAKE_CASE : Any = None @classmethod def UpperCAmelCase__ ( cls : List[str] , _A : Optional[int] ): """simple docstring""" return super()._strip_protocol(_A ).lstrip('''/''' ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" if self.dir_cache is None: __SCREAMING_SNAKE_CASE : List[str] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} __SCREAMING_SNAKE_CASE : Union[str, Any] = {f['''name''']: f} def UpperCAmelCase__ ( self : Dict , _A : str ): """simple docstring""" return self.file.open().read() def UpperCAmelCase__ ( self : Any , _A : str , _A : str = "rb" , _A : Tuple=None , _A : Union[str, Any]=True , _A : Dict=None , **_A : List[Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self._strip_protocol(_A ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''bz2''' lowerCAmelCase_ = '''bz2''' lowerCAmelCase_ = '''.bz2''' class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''gzip''' lowerCAmelCase_ = '''gzip''' lowerCAmelCase_ = '''.gz''' class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''lz4''' lowerCAmelCase_ = '''lz4''' lowerCAmelCase_ = '''.lz4''' class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''xz''' lowerCAmelCase_ = '''xz''' lowerCAmelCase_ = '''.xz''' class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''zstd''' lowerCAmelCase_ = '''zstd''' lowerCAmelCase_ = '''.zst''' def __init__( self : str , _A : str , _A : str = "rb" , _A : Optional[str] = None , _A : Optional[dict] = None , _A : int = DEFAULT_BLOCK_SIZE , **_A : str , ): """simple docstring""" super().__init__( fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 __SCREAMING_SNAKE_CASE : Optional[Any] = self.file.__enter__ class __UpperCamelCase : """simple docstring""" def __init__( self : int , _A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = file_ def __enter__( self : int ): """simple docstring""" self._file.__enter__() return self def __exit__( self : Any , *_A : int , **_A : List[Any] ): """simple docstring""" self._file.__exit__(*_A , **_A ) def __iter__( self : List[Any] ): """simple docstring""" return iter(self._file ) def UpperCAmelCase__ ( self : int ): """simple docstring""" return next(self._file ) def __getattr__( self : Dict , _A : Optional[int] ): """simple docstring""" return getattr(self._file , _A ) def fixed_enter(*_A : List[str] , **_A : str ): return WrappedFile(_enter(*_A , **_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = fixed_enter
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''roc_bert''' def __init__( self : Tuple , _A : List[Any]=3_0522 , _A : Optional[int]=768 , _A : List[str]=12 , _A : str=12 , _A : Tuple=3072 , _A : int="gelu" , _A : Optional[Any]=0.1 , _A : Any=0.1 , _A : Optional[int]=512 , _A : Optional[int]=2 , _A : List[str]=0.02 , _A : Optional[Any]=1e-12 , _A : Tuple=True , _A : Union[str, Any]=0 , _A : Any="absolute" , _A : str=None , _A : int=True , _A : str=True , _A : List[Any]=768 , _A : Any=910 , _A : str=512 , _A : Union[str, Any]=2_4858 , _A : Tuple=True , **_A : List[str] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : str = num_attention_heads __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size __SCREAMING_SNAKE_CASE : Any = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = use_cache __SCREAMING_SNAKE_CASE : Optional[Any] = enable_pronunciation __SCREAMING_SNAKE_CASE : Union[str, Any] = enable_shape __SCREAMING_SNAKE_CASE : List[str] = pronunciation_embed_dim __SCREAMING_SNAKE_CASE : str = pronunciation_vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = shape_embed_dim __SCREAMING_SNAKE_CASE : Any = shape_vocab_size __SCREAMING_SNAKE_CASE : int = concat_input __SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type __SCREAMING_SNAKE_CASE : int = classifier_dropout super().__init__(pad_token_id=_A , **_A )
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
def a__ ( snake_case , snake_case ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __SCREAMING_SNAKE_CASE : Dict = str(bin(snake_case ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(snake_case ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE : str = max(len(snake_case ) , len(snake_case ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def a__ ( snake_case , snake_case=False ): """simple docstring""" try: __SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __SCREAMING_SNAKE_CASE : Any = default else: # KEY is set, convert it to True or False. try: __SCREAMING_SNAKE_CASE : List[str] = strtobool(snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value lowercase_ = parse_flag_from_env("""RUN_SLOW""", default=False) lowercase_ = parse_flag_from_env("""RUN_REMOTE""", default=False) lowercase_ = parse_flag_from_env("""RUN_LOCAL""", default=True) lowercase_ = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression lowercase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") lowercase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") lowercase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio lowercase_ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam lowercase_ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility lowercase_ = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows lowercase_ = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def a__ ( snake_case ): """simple docstring""" try: import faiss # noqa except ImportError: __SCREAMING_SNAKE_CASE : List[Any] = unittest.skip('''test requires faiss''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" try: import regex # noqa except ImportError: __SCREAMING_SNAKE_CASE : List[str] = unittest.skip('''test requires regex''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" try: import elasticsearch # noqa except ImportError: __SCREAMING_SNAKE_CASE : Dict = unittest.skip('''test requires elasticsearch''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" try: import sqlalchemy # noqa except ImportError: __SCREAMING_SNAKE_CASE : Any = unittest.skip('''test requires sqlalchemy''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not config.TORCH_AVAILABLE: __SCREAMING_SNAKE_CASE : Optional[Any] = unittest.skip('''test requires PyTorch''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not config.TF_AVAILABLE: __SCREAMING_SNAKE_CASE : Dict = unittest.skip('''test requires TensorFlow''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not config.JAX_AVAILABLE: __SCREAMING_SNAKE_CASE : Tuple = unittest.skip('''test requires JAX''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not config.PIL_AVAILABLE: __SCREAMING_SNAKE_CASE : Union[str, Any] = unittest.skip('''test requires Pillow''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(snake_case ) else: return test_case def a__ ( snake_case ): """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(snake_case ) else: return test_case def a__ ( snake_case ): """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(snake_case ) else: return test_case def a__ ( snake_case ): """simple docstring""" def _require_spacy_model(snake_case ): try: import spacy # noqa F401 spacy.load(snake_case ) except ImportError: return unittest.skip('''test requires spacy''' )(snake_case ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(snake_case ) )(snake_case ) else: return test_case return _require_spacy_model def a__ ( snake_case ): """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(snake_case ) else: return test_case def a__ ( snake_case ): """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(snake_case ) else: return test_case def a__ ( snake_case ): """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = unittest.skip('''test is slow''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __SCREAMING_SNAKE_CASE : int = unittest.skip('''test is local''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = unittest.skip('''test is packaged''' )(snake_case ) return test_case def a__ ( snake_case ): """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __SCREAMING_SNAKE_CASE : Tuple = unittest.skip('''test requires remote''' )(snake_case ) return test_case def a__ ( *snake_case ): """simple docstring""" def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(snake_case ) and name.startswith('''test''' ): for decorator in decorators: __SCREAMING_SNAKE_CASE : Optional[Any] = decorator(snake_case ) setattr(cls , snake_case , snake_case ) return cls return decorate class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" pass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 @contextmanager def a__ ( snake_case=OfflineSimulationMode.CONNECTION_FAILS , snake_case=1E-16 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = requests.Session().request def timeout_request(snake_case , snake_case , snake_case , **snake_case ): # Change the url to an invalid url so that the connection hangs __SCREAMING_SNAKE_CASE : Dict = '''https://10.255.255.1''' if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __SCREAMING_SNAKE_CASE : Optional[int] = timeout try: return online_request(snake_case , snake_case , **snake_case ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __SCREAMING_SNAKE_CASE : Dict = url __SCREAMING_SNAKE_CASE : Union[str, Any] = e.args[0] __SCREAMING_SNAKE_CASE : Optional[Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),) __SCREAMING_SNAKE_CASE : Optional[int] = (max_retry_error,) raise def raise_connection_error(snake_case , snake_case , **snake_case ): raise requests.ConnectionError('''Offline mode is enabled.''' , request=snake_case ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' , snake_case ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' , snake_case ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def a__ ( *snake_case , **snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*snake_case , **snake_case ) as tmp_dir: try: os.chdir(snake_case ) yield finally: os.chdir(snake_case ) @contextmanager def a__ ( ): """simple docstring""" import gc gc.collect() __SCREAMING_SNAKE_CASE : Dict = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def a__ ( ): """simple docstring""" import gc gc.collect() __SCREAMING_SNAKE_CASE : List[str] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def a__ ( snake_case , snake_case ): """simple docstring""" return deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist() def a__ ( snake_case ): """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(snake_case , *snake_case , **snake_case ): try: return func(*snake_case , **snake_case ) except HTTPError as err: if str(snake_case ).startswith('''500''' ) or str(snake_case ).startswith('''502''' ): pytest.xfail(str(snake_case ) ) raise err return decorator.decorator(_wrapper , snake_case ) class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : int , _A : str , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = returncode __SCREAMING_SNAKE_CASE : List[Any] = stdout __SCREAMING_SNAKE_CASE : Union[str, Any] = stderr async def a__ ( snake_case , snake_case ): """simple docstring""" while True: __SCREAMING_SNAKE_CASE : Optional[int] = await stream.readline() if line: callback(snake_case ) else: break async def a__ ( snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=False , snake_case=False ): """simple docstring""" if echo: print('''\nRunning: ''' , ''' '''.join(snake_case ) ) __SCREAMING_SNAKE_CASE : List[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __SCREAMING_SNAKE_CASE : Optional[int] = [] __SCREAMING_SNAKE_CASE : int = [] def tee(snake_case , snake_case , snake_case , snake_case="" ): __SCREAMING_SNAKE_CASE : Any = line.decode('''utf-8''' ).rstrip() sink.append(snake_case ) if not quiet: print(snake_case , snake_case , file=snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda snake_case : tee(snake_case , snake_case , sys.stdout , label='''stdout:''' ) ), _read_stream(p.stderr , lambda snake_case : tee(snake_case , snake_case , sys.stderr , label='''stderr:''' ) ), ] , timeout=snake_case , ) return _RunOutput(await p.wait() , snake_case , snake_case ) def a__ ( snake_case , snake_case=None , snake_case=None , snake_case=180 , snake_case=False , snake_case=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = asyncio.get_event_loop() __SCREAMING_SNAKE_CASE : Optional[Any] = loop.run_until_complete( _stream_subprocess(snake_case , env=snake_case , stdin=snake_case , timeout=snake_case , quiet=snake_case , echo=snake_case ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ''' '''.join(snake_case ) if result.returncode > 0: __SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' ) __SCREAMING_SNAKE_CASE : List[Any] = re.sub(R'''^gw''' , '''''' , snake_case , 0 , re.M ) return int(snake_case ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = 29_500 __SCREAMING_SNAKE_CASE : Tuple = pytest_xdist_worker_id() return port + uniq_delta
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''glpn''' def __init__( self : Optional[int] , _A : List[Any]=3 , _A : Dict=4 , _A : Tuple=[2, 2, 2, 2] , _A : Dict=[8, 4, 2, 1] , _A : Dict=[32, 64, 160, 256] , _A : List[Any]=[7, 3, 3, 3] , _A : str=[4, 2, 2, 2] , _A : Optional[Any]=[1, 2, 5, 8] , _A : Any=[4, 4, 4, 4] , _A : Union[str, Any]="gelu" , _A : int=0.0 , _A : str=0.0 , _A : Optional[Any]=0.02 , _A : List[str]=0.1 , _A : Union[str, Any]=1e-6 , _A : Dict=64 , _A : Tuple=10 , _A : str=-1 , **_A : Optional[int] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : int = num_channels __SCREAMING_SNAKE_CASE : List[str] = num_encoder_blocks __SCREAMING_SNAKE_CASE : int = depths __SCREAMING_SNAKE_CASE : List[Any] = sr_ratios __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes __SCREAMING_SNAKE_CASE : Dict = patch_sizes __SCREAMING_SNAKE_CASE : Tuple = strides __SCREAMING_SNAKE_CASE : Dict = mlp_ratios __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Tuple = initializer_range __SCREAMING_SNAKE_CASE : int = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps __SCREAMING_SNAKE_CASE : Tuple = decoder_hidden_size __SCREAMING_SNAKE_CASE : str = max_depth __SCREAMING_SNAKE_CASE : Any = head_in_index
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase_ = 16 lowercase_ = 32 def a__ ( snake_case , snake_case = 16 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE : int = datasets.map( snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE : int = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE : List[Any] = 8 else: __SCREAMING_SNAKE_CASE : Dict = None return tokenizer.pad( snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE : List[str] = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) __SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase_ = mocked_dataloaders # noqa: F811 def a__ ( snake_case , snake_case ): """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1": __SCREAMING_SNAKE_CASE : Union[str, Any] = 2 # New Code # __SCREAMING_SNAKE_CASE : str = int(args.gradient_accumulation_steps ) __SCREAMING_SNAKE_CASE : int = int(args.local_sgd_steps ) # Initialize accelerator __SCREAMING_SNAKE_CASE : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE : List[str] = config['''lr'''] __SCREAMING_SNAKE_CASE : str = int(config['''num_epochs'''] ) __SCREAMING_SNAKE_CASE : Any = int(config['''seed'''] ) __SCREAMING_SNAKE_CASE : List[Any] = int(config['''batch_size'''] ) __SCREAMING_SNAKE_CASE : int = evaluate.load('''glue''' , '''mrpc''' ) set_seed(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(snake_case , snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE : Dict = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE : List[Any] = AdamW(params=model.parameters() , lr=snake_case ) # Instantiate scheduler __SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # Now we train the model for epoch in range(snake_case ): model.train() with LocalSGD( accelerator=snake_case , model=snake_case , local_sgd_steps=snake_case , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = output.loss accelerator.backward(snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[int] = model(**snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case , references=snake_case , ) __SCREAMING_SNAKE_CASE : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , snake_case ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument( '''--local_sgd_steps''' , type=snake_case , default=8 , help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) __SCREAMING_SNAKE_CASE : str = parser.parse_args() __SCREAMING_SNAKE_CASE : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
import datasets from .evaluate import evaluate lowercase_ = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ lowercase_ = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ lowercase_ = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )}, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , ) def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} __SCREAMING_SNAKE_CASE : int = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] __SCREAMING_SNAKE_CASE : Optional[int] = evaluate(dataset=_A , predictions=_A ) return score
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class __UpperCamelCase : """simple docstring""" def __init__( self : int , _A : str = "cpu" , _A : str = "openai/clip-vit-large-patch14" ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = device __SCREAMING_SNAKE_CASE : int = CLIPTokenizerFast.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] __SCREAMING_SNAKE_CASE : Optional[int] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] __SCREAMING_SNAKE_CASE : Optional[int] = torchvision.transforms.Normalize(self.image_mean , self.image_std ) __SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) __SCREAMING_SNAKE_CASE : int = torchvision.transforms.CenterCrop(224 ) def UpperCAmelCase__ ( self : Any , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.resize(_A ) __SCREAMING_SNAKE_CASE : Any = self.center_crop(_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.normalize(_A ) return images def __call__( self : int , _A : Dict=None , _A : str=None , **_A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(text=_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.preprocess_img(_A ) __SCREAMING_SNAKE_CASE : str = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class __UpperCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , _A : Any=10 , _A : Tuple=0.01 , _A : int=None , _A : Any=None , _A : str=None , _A : str=None , _A : Union[str, Any]=None , _A : int=None , _A : str=False , _A : int=True , _A : str="image" , _A : Union[str, Any]=True , _A : Tuple=False , _A : str=False , _A : Dict=False , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : List[str] = None __SCREAMING_SNAKE_CASE : Optional[Any] = device if device else get_device() if vqgan: __SCREAMING_SNAKE_CASE : Any = vqgan else: __SCREAMING_SNAKE_CASE : Optional[Any] = load_vqgan(self.device , conf_path=_A , ckpt_path=_A ) self.vqgan.eval() if clip: __SCREAMING_SNAKE_CASE : int = clip else: __SCREAMING_SNAKE_CASE : Optional[Any] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' ) self.clip.to(self.device ) __SCREAMING_SNAKE_CASE : Any = ProcessorGradientFlow(device=self.device ) __SCREAMING_SNAKE_CASE : Any = iterations __SCREAMING_SNAKE_CASE : List[str] = lr __SCREAMING_SNAKE_CASE : List[str] = log __SCREAMING_SNAKE_CASE : List[str] = make_grid __SCREAMING_SNAKE_CASE : Optional[Any] = return_val __SCREAMING_SNAKE_CASE : Optional[Any] = quantize __SCREAMING_SNAKE_CASE : Any = self.vqgan.decoder.z_shape def UpperCAmelCase__ ( self : List[str] , _A : int=None , _A : str=None , _A : str=5 , _A : str=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = [] if output_path is None: __SCREAMING_SNAKE_CASE : Dict = '''./animation.gif''' if input_path is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.save_path __SCREAMING_SNAKE_CASE : List[Any] = sorted(glob(input_path + '''/*''' ) ) if not len(_A ): raise ValueError( '''No images found in save path, aborting (did you pass save_intermediate=True to the generate''' ''' function?)''' ) if len(_A ) == 1: print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' ) __SCREAMING_SNAKE_CASE : Tuple = total_duration / len(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [frame_duration] * len(_A ) if extend_frames: __SCREAMING_SNAKE_CASE : List[Any] = 1.5 __SCREAMING_SNAKE_CASE : Optional[int] = 3 for file_name in paths: if file_name.endswith('''.png''' ): images.append(imageio.imread(_A ) ) imageio.mimsave(_A , _A , duration=_A ) print(F'''gif saved to {output_path}''' ) def UpperCAmelCase__ ( self : List[Any] , _A : Dict=None , _A : Tuple=None ): """simple docstring""" if not (path or img): raise ValueError('''Input either path or tensor''' ) if img is not None: raise NotImplementedError __SCREAMING_SNAKE_CASE : Union[str, Any] = preprocess(Image.open(_A ) , target_image_size=256 ).to(self.device ) __SCREAMING_SNAKE_CASE : Tuple = preprocess_vqgan(_A ) __SCREAMING_SNAKE_CASE, *__SCREAMING_SNAKE_CASE : List[str] = self.vqgan.encode(_A ) return z def UpperCAmelCase__ ( self : Optional[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.latent.detach().requires_grad_() __SCREAMING_SNAKE_CASE : Any = base_latent + transform_vector if self.quantize: __SCREAMING_SNAKE_CASE, *__SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(_A ) else: __SCREAMING_SNAKE_CASE : Tuple = trans_latent return self.vqgan.decode(_A ) def UpperCAmelCase__ ( self : List[Any] , _A : Tuple , _A : Dict , _A : Optional[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=_A , images=_A , return_tensors='''pt''' , padding=_A ) __SCREAMING_SNAKE_CASE : str = self.clip(**_A ) __SCREAMING_SNAKE_CASE : List[str] = clip_outputs.logits_per_image if weights is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCAmelCase__ ( self : Tuple , _A : List[Any] , _A : List[Any] , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , _A , weights=(1 / pos_prompts['''weights''']) ) if neg_prompts: __SCREAMING_SNAKE_CASE : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''] , _A , weights=neg_prompts['''weights'''] ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([1] , device=self.device ) __SCREAMING_SNAKE_CASE : Dict = -torch.log(_A ) + torch.log(_A ) return loss def UpperCAmelCase__ ( self : int , _A : str , _A : List[Any] , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = torch.randn_like(self.latent , requires_grad=_A , device=self.device ) __SCREAMING_SNAKE_CASE : List[Any] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() __SCREAMING_SNAKE_CASE : List[Any] = self._add_vector(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = loop_post_process(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = self._get_CLIP_loss(_A , _A , _A ) print('''CLIP loss''' , _A ) if self.log: wandb.log({'''CLIP Loss''': clip_loss} ) clip_loss.backward(retain_graph=_A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCAmelCase__ ( self : str , _A : List[str] , _A : int , _A : List[Any] ): """simple docstring""" wandb.init(reinit=_A , project='''face-editor''' ) wandb.config.update({'''Positive Prompts''': positive_prompts} ) wandb.config.update({'''Negative Prompts''': negative_prompts} ) wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} ) if image_path: __SCREAMING_SNAKE_CASE : Dict = Image.open(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = image.resize((256, 256) ) wandb.log('''Original Image''' , wandb.Image(_A ) ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[int] ): """simple docstring""" if not prompts: return [] __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : Tuple = [] if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : int = [prompt.strip() for prompt in prompts.split('''|''' )] for prompt in prompts: if isinstance(_A , (tuple, list) ): __SCREAMING_SNAKE_CASE : Union[str, Any] = prompt[0] __SCREAMING_SNAKE_CASE : int = float(prompt[1] ) elif ":" in prompt: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = prompt.split(''':''' ) __SCREAMING_SNAKE_CASE : str = float(_A ) else: __SCREAMING_SNAKE_CASE : int = prompt __SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(_A ) weights.append(_A ) return { "prompts": processed_prompts, "weights": torch.tensor(_A , device=self.device ), } def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : List[Any]=None , _A : Tuple=None , _A : Dict=True , _A : int=False , _A : List[Any]=True , _A : Tuple=True , _A : int=None , ): """simple docstring""" if image_path: __SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_latent(_A ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(_A , _A , _A ) assert pos_prompts, "You must provide at least one positive prompt." __SCREAMING_SNAKE_CASE : Tuple = self.process_prompts(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.process_prompts(_A ) if save_final and save_path is None: __SCREAMING_SNAKE_CASE : List[str] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) ) if not os.path.exists(_A ): os.makedirs(_A ) else: __SCREAMING_SNAKE_CASE : str = save_path + '''_''' + get_timestamp() os.makedirs(_A ) __SCREAMING_SNAKE_CASE : Dict = save_path __SCREAMING_SNAKE_CASE : str = self.vqgan.decode(self.latent )[0] if show_intermediate: print('''Original Image''' ) show_pil(custom_to_pil(_A ) ) __SCREAMING_SNAKE_CASE : str = loop_post_process(_A ) for iter, transformed_img in enumerate(self._optimize_CLIP(_A , _A , _A ) ): if show_intermediate: show_pil(_A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({'''Image''': wandb.Image(_A )} ) if show_final: show_pil(_A ) if save_final: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''wavlm''' def __init__( self : Optional[Any] , _A : List[Any]=32 , _A : List[Any]=768 , _A : Optional[int]=12 , _A : Optional[int]=12 , _A : List[str]=3072 , _A : List[Any]="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : int=0.1 , _A : Optional[int]=0.0 , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[Any]=0.02 , _A : Tuple=1e-5 , _A : Union[str, Any]="group" , _A : Optional[Any]="gelu" , _A : List[str]=(512, 512, 512, 512, 512, 512, 512) , _A : List[str]=(5, 2, 2, 2, 2, 2, 2) , _A : str=(10, 3, 3, 3, 3, 2, 2) , _A : Union[str, Any]=False , _A : Union[str, Any]=128 , _A : str=16 , _A : Union[str, Any]=320 , _A : Optional[int]=800 , _A : Dict=False , _A : Optional[int]=True , _A : Any=0.05 , _A : List[Any]=10 , _A : Tuple=2 , _A : Tuple=0.0 , _A : str=10 , _A : str=320 , _A : Any=2 , _A : Optional[Any]=0.1 , _A : Optional[int]=100 , _A : Union[str, Any]=256 , _A : Optional[Any]=256 , _A : Tuple=0.1 , _A : Union[str, Any]="mean" , _A : str=False , _A : Any=False , _A : List[str]=256 , _A : str=(512, 512, 512, 512, 1500) , _A : Any=(5, 3, 3, 1, 1) , _A : List[str]=(1, 2, 3, 1, 1) , _A : List[Any]=512 , _A : List[Any]=80 , _A : Optional[Any]=0 , _A : str=1 , _A : str=2 , _A : Union[str, Any]=False , _A : List[str]=3 , _A : Any=2 , _A : str=3 , _A : Optional[Any]=None , **_A : Any , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A ) __SCREAMING_SNAKE_CASE : Tuple = hidden_size __SCREAMING_SNAKE_CASE : int = feat_extract_norm __SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_activation __SCREAMING_SNAKE_CASE : Dict = list(_A ) __SCREAMING_SNAKE_CASE : int = list(_A ) __SCREAMING_SNAKE_CASE : Dict = list(_A ) __SCREAMING_SNAKE_CASE : Dict = conv_bias __SCREAMING_SNAKE_CASE : int = num_buckets __SCREAMING_SNAKE_CASE : List[Any] = max_bucket_distance __SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE : str = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE : Optional[int] = len(self.conv_dim ) __SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : int = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : str = hidden_dropout __SCREAMING_SNAKE_CASE : List[Any] = attention_dropout __SCREAMING_SNAKE_CASE : List[Any] = activation_dropout __SCREAMING_SNAKE_CASE : Any = feat_proj_dropout __SCREAMING_SNAKE_CASE : Union[str, Any] = final_dropout __SCREAMING_SNAKE_CASE : List[str] = layerdrop __SCREAMING_SNAKE_CASE : str = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[int] = initializer_range __SCREAMING_SNAKE_CASE : Optional[Any] = num_ctc_classes __SCREAMING_SNAKE_CASE : Dict = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = do_stable_layer_norm __SCREAMING_SNAKE_CASE : str = use_weighted_layer_sum __SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE : Tuple = apply_spec_augment __SCREAMING_SNAKE_CASE : Tuple = mask_time_prob __SCREAMING_SNAKE_CASE : int = mask_time_length __SCREAMING_SNAKE_CASE : Optional[int] = mask_time_min_masks __SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_prob __SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_length # parameters for pretraining with codevector quantized representations __SCREAMING_SNAKE_CASE : Optional[int] = num_codevectors_per_group __SCREAMING_SNAKE_CASE : Dict = num_codevector_groups __SCREAMING_SNAKE_CASE : Tuple = contrastive_logits_temperature __SCREAMING_SNAKE_CASE : int = num_negatives __SCREAMING_SNAKE_CASE : Union[str, Any] = codevector_dim __SCREAMING_SNAKE_CASE : List[str] = proj_codevector_dim __SCREAMING_SNAKE_CASE : Tuple = diversity_loss_weight # ctc loss __SCREAMING_SNAKE_CASE : str = ctc_loss_reduction __SCREAMING_SNAKE_CASE : Tuple = ctc_zero_infinity # adapter __SCREAMING_SNAKE_CASE : Optional[int] = add_adapter __SCREAMING_SNAKE_CASE : Dict = adapter_kernel_size __SCREAMING_SNAKE_CASE : Union[str, Any] = adapter_stride __SCREAMING_SNAKE_CASE : List[str] = num_adapter_layers __SCREAMING_SNAKE_CASE : Optional[int] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE : List[Any] = list(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = list(_A ) __SCREAMING_SNAKE_CASE : Tuple = list(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = xvector_output_dim @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''mgp-str''' def __init__( self : Optional[Any] , _A : Tuple=[32, 128] , _A : List[Any]=4 , _A : str=3 , _A : Union[str, Any]=27 , _A : Optional[int]=38 , _A : Optional[int]=5_0257 , _A : Optional[int]=3_0522 , _A : Tuple=768 , _A : Any=12 , _A : str=12 , _A : int=4.0 , _A : int=True , _A : List[Any]=False , _A : Optional[Any]=1e-5 , _A : str=0.0 , _A : List[Any]=0.0 , _A : Optional[int]=0.0 , _A : Any=False , _A : Optional[Any]=0.02 , **_A : List[Any] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : List[Any] = image_size __SCREAMING_SNAKE_CASE : str = patch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Tuple = max_token_length __SCREAMING_SNAKE_CASE : Dict = num_character_labels __SCREAMING_SNAKE_CASE : Any = num_bpe_labels __SCREAMING_SNAKE_CASE : Any = num_wordpiece_labels __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio __SCREAMING_SNAKE_CASE : Union[str, Any] = distilled __SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps __SCREAMING_SNAKE_CASE : List[Any] = drop_rate __SCREAMING_SNAKE_CASE : Dict = qkv_bias __SCREAMING_SNAKE_CASE : List[Any] = attn_drop_rate __SCREAMING_SNAKE_CASE : Tuple = drop_path_rate __SCREAMING_SNAKE_CASE : Any = output_aa_attentions __SCREAMING_SNAKE_CASE : Tuple = initializer_range
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = SwinvaConfig() __SCREAMING_SNAKE_CASE : Optional[Any] = swinva_name.split('''_''' ) __SCREAMING_SNAKE_CASE : Dict = name_split[1] if "to" in name_split[3]: __SCREAMING_SNAKE_CASE : int = int(name_split[3][-3:] ) else: __SCREAMING_SNAKE_CASE : str = int(name_split[3] ) if "to" in name_split[2]: __SCREAMING_SNAKE_CASE : Any = int(name_split[2][-2:] ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][6:] ) if model_size == "tiny": __SCREAMING_SNAKE_CASE : int = 96 __SCREAMING_SNAKE_CASE : int = (2, 2, 6, 2) __SCREAMING_SNAKE_CASE : Tuple = (3, 6, 12, 24) elif model_size == "small": __SCREAMING_SNAKE_CASE : Any = 96 __SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE : Optional[int] = (3, 6, 12, 24) elif model_size == "base": __SCREAMING_SNAKE_CASE : Optional[Any] = 128 __SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE : Tuple = (4, 8, 16, 32) else: __SCREAMING_SNAKE_CASE : Any = 192 __SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE : Optional[int] = (6, 12, 24, 48) if "to" in swinva_name: __SCREAMING_SNAKE_CASE : Optional[Any] = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __SCREAMING_SNAKE_CASE : int = 21_841 __SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : List[str] = '''imagenet-22k-id2label.json''' __SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Tuple = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel __SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()} else: __SCREAMING_SNAKE_CASE : Tuple = 1_000 __SCREAMING_SNAKE_CASE : str = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : str = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : int = idalabel __SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : int = img_size __SCREAMING_SNAKE_CASE : Tuple = num_classes __SCREAMING_SNAKE_CASE : str = embed_dim __SCREAMING_SNAKE_CASE : Optional[int] = depths __SCREAMING_SNAKE_CASE : str = num_heads __SCREAMING_SNAKE_CASE : Dict = window_size return config def a__ ( snake_case ): """simple docstring""" if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE : int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: __SCREAMING_SNAKE_CASE : str = '''encoder.''' + name if "attn.proj" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE : int = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": __SCREAMING_SNAKE_CASE : Any = '''layernorm.weight''' if name == "norm.bias": __SCREAMING_SNAKE_CASE : int = '''layernorm.bias''' if "head" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''head''' , '''classifier''' ) else: __SCREAMING_SNAKE_CASE : List[str] = '''swinv2.''' + name return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : str = orig_state_dict.pop(snake_case ) if "mask" in key: continue elif "qkv" in key: __SCREAMING_SNAKE_CASE : Dict = key.split('''.''' ) __SCREAMING_SNAKE_CASE : List[Any] = int(key_split[1] ) __SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[3] ) __SCREAMING_SNAKE_CASE : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim, :] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : List[str] = val[-dim:] else: __SCREAMING_SNAKE_CASE : str = val return orig_state_dict def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model(snake_case , pretrained=snake_case ) timm_model.eval() __SCREAMING_SNAKE_CASE : Optional[Any] = get_swinva_config(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification(snake_case ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(timm_model.state_dict() , snake_case ) model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) __SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw ) __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=snake_case , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = timm_model(inputs['''pixel_values'''] ) __SCREAMING_SNAKE_CASE : Dict = model(**snake_case ).logits assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case ) model.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase_ = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
from __future__ import annotations def a__ ( snake_case , snake_case ): """simple docstring""" if nth_term == "": return [""] __SCREAMING_SNAKE_CASE : int = int(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = int(snake_case ) __SCREAMING_SNAKE_CASE : list[str] = [] for temp in range(int(snake_case ) ): series.append(F'''1 / {pow(temp + 1 , int(snake_case ) )}''' if series else '''1''' ) return series if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = int(input("""Enter the last number (nth term) of the P-Series""")) lowercase_ = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''informer''' lowerCAmelCase_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Tuple , _A : Optional[int] = None , _A : Optional[int] = None , _A : str = "student_t" , _A : str = "nll" , _A : int = 1 , _A : List[int] = None , _A : Optional[Union[str, bool]] = "mean" , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : Optional[List[int]] = None , _A : Optional[List[int]] = None , _A : int = 64 , _A : int = 32 , _A : int = 32 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : bool = True , _A : str = "gelu" , _A : float = 0.05 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : int = 100 , _A : float = 0.02 , _A : Union[str, Any]=True , _A : str = "prob" , _A : int = 5 , _A : bool = True , **_A : int , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = prediction_length __SCREAMING_SNAKE_CASE : Union[str, Any] = context_length or prediction_length __SCREAMING_SNAKE_CASE : Optional[int] = distribution_output __SCREAMING_SNAKE_CASE : Optional[int] = loss __SCREAMING_SNAKE_CASE : Union[str, Any] = input_size __SCREAMING_SNAKE_CASE : Any = num_time_features __SCREAMING_SNAKE_CASE : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __SCREAMING_SNAKE_CASE : Tuple = scaling __SCREAMING_SNAKE_CASE : List[Any] = num_dynamic_real_features __SCREAMING_SNAKE_CASE : str = num_static_real_features __SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(_A ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) __SCREAMING_SNAKE_CASE : List[Any] = cardinality else: __SCREAMING_SNAKE_CASE : int = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(_A ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) __SCREAMING_SNAKE_CASE : Any = embedding_dimension else: __SCREAMING_SNAKE_CASE : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __SCREAMING_SNAKE_CASE : str = num_parallel_samples # Transformer architecture configuration __SCREAMING_SNAKE_CASE : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features __SCREAMING_SNAKE_CASE : Optional[int] = d_model __SCREAMING_SNAKE_CASE : Any = encoder_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim __SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim __SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers __SCREAMING_SNAKE_CASE : List[str] = decoder_layers __SCREAMING_SNAKE_CASE : List[str] = dropout __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout __SCREAMING_SNAKE_CASE : Any = activation_dropout __SCREAMING_SNAKE_CASE : int = encoder_layerdrop __SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop __SCREAMING_SNAKE_CASE : Optional[Any] = activation_function __SCREAMING_SNAKE_CASE : Optional[Any] = init_std __SCREAMING_SNAKE_CASE : List[str] = use_cache # Informer __SCREAMING_SNAKE_CASE : Optional[int] = attention_type __SCREAMING_SNAKE_CASE : int = sampling_factor __SCREAMING_SNAKE_CASE : Optional[Any] = distil super().__init__(is_encoder_decoder=_A , **_A ) @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def a__ ( snake_case ): """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def a__ ( ): """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = '''mock-s3-bucket''' __SCREAMING_SNAKE_CASE : int = F'''s3://{mock_bucket}''' __SCREAMING_SNAKE_CASE : Dict = extract_path_from_uri(snake_case ) assert dataset_path.startswith('''s3://''' ) is False __SCREAMING_SNAKE_CASE : List[str] = '''./local/path''' __SCREAMING_SNAKE_CASE : str = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = is_remote_filesystem(snake_case ) assert is_remote is True __SCREAMING_SNAKE_CASE : int = fsspec.filesystem('''file''' ) __SCREAMING_SNAKE_CASE : str = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , snake_case ) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} __SCREAMING_SNAKE_CASE : List[str] = input_paths[compression_fs_class.protocol] if input_path is None: __SCREAMING_SNAKE_CASE : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) __SCREAMING_SNAKE_CASE : str = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(snake_case , '''r''' , encoding='''utf-8''' ) as f, open(snake_case , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} __SCREAMING_SNAKE_CASE : Dict = compressed_file_paths[protocol] __SCREAMING_SNAKE_CASE : int = '''dataset.jsonl''' __SCREAMING_SNAKE_CASE : int = F'''{protocol}://{member_file_path}::{compressed_file_path}''' __SCREAMING_SNAKE_CASE, *__SCREAMING_SNAKE_CASE : Dict = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = hf_api.dataset_info(snake_case , token=snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(snake_case ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
from __future__ import annotations import math import random from typing import Any class __UpperCamelCase : """simple docstring""" def __init__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : list[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : int = 0 def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return self.head == self.tail def UpperCAmelCase__ ( self : Tuple , _A : Any ): """simple docstring""" self.data.append(_A ) __SCREAMING_SNAKE_CASE : Dict = self.tail + 1 def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.data[self.head] __SCREAMING_SNAKE_CASE : Dict = self.head + 1 return ret def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return self.tail - self.head def UpperCAmelCase__ ( self : Any ): """simple docstring""" print(self.data ) print('''**************''' ) print(self.data[self.head : self.tail] ) class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = data __SCREAMING_SNAKE_CASE : MyNode | None = None __SCREAMING_SNAKE_CASE : MyNode | None = None __SCREAMING_SNAKE_CASE : int = 1 def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return self.data def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" return self.left def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.right def UpperCAmelCase__ ( self : str ): """simple docstring""" return self.height def UpperCAmelCase__ ( self : str , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = data def UpperCAmelCase__ ( self : Union[str, Any] , _A : MyNode | None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = node def UpperCAmelCase__ ( self : List[str] , _A : MyNode | None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = node def UpperCAmelCase__ ( self : Optional[Any] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = height def a__ ( snake_case ): """simple docstring""" if node is None: return 0 return node.get_height() def a__ ( snake_case , snake_case ): """simple docstring""" if a > b: return a return b def a__ ( snake_case ): """simple docstring""" print('''left rotation node:''' , node.get_data() ) __SCREAMING_SNAKE_CASE : int = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(snake_case ) return ret def a__ ( snake_case ): """simple docstring""" print('''right rotation node:''' , node.get_data() ) __SCREAMING_SNAKE_CASE : List[Any] = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(snake_case ) return ret def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = node.get_left() assert left_child is not None node.set_left(left_rotation(snake_case ) ) return right_rotation(snake_case ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = node.get_right() assert right_child is not None node.set_right(right_rotation(snake_case ) ) return left_rotation(snake_case ) def a__ ( snake_case , snake_case ): """simple docstring""" if node is None: return MyNode(snake_case ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , snake_case ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected __SCREAMING_SNAKE_CASE : Optional[int] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child __SCREAMING_SNAKE_CASE : Optional[Any] = right_rotation(snake_case ) else: __SCREAMING_SNAKE_CASE : List[str] = lr_rotation(snake_case ) else: node.set_right(insert_node(node.get_right() , snake_case ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: __SCREAMING_SNAKE_CASE : int = node.get_right() assert right_child is not None if data < right_child.get_data(): __SCREAMING_SNAKE_CASE : Any = rl_rotation(snake_case ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = left_rotation(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(snake_case ) return node def a__ ( snake_case ): """simple docstring""" while True: __SCREAMING_SNAKE_CASE : int = root.get_right() if right_child is None: break __SCREAMING_SNAKE_CASE : Union[str, Any] = right_child return root.get_data() def a__ ( snake_case ): """simple docstring""" while True: __SCREAMING_SNAKE_CASE : List[str] = root.get_left() if left_child is None: break __SCREAMING_SNAKE_CASE : str = left_child return root.get_data() def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = root.get_left() __SCREAMING_SNAKE_CASE : List[Any] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: __SCREAMING_SNAKE_CASE : List[str] = get_left_most(snake_case ) root.set_data(snake_case ) root.set_right(del_node(snake_case , snake_case ) ) elif left_child is not None: __SCREAMING_SNAKE_CASE : List[Any] = left_child elif right_child is not None: __SCREAMING_SNAKE_CASE : Dict = right_child else: return None elif root.get_data() > data: if left_child is None: print('''No such data''' ) return root else: root.set_left(del_node(snake_case , snake_case ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(snake_case , snake_case ) ) if get_height(snake_case ) - get_height(snake_case ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): __SCREAMING_SNAKE_CASE : Dict = left_rotation(snake_case ) else: __SCREAMING_SNAKE_CASE : Any = rl_rotation(snake_case ) elif get_height(snake_case ) - get_height(snake_case ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): __SCREAMING_SNAKE_CASE : Optional[Any] = right_rotation(snake_case ) else: __SCREAMING_SNAKE_CASE : Any = lr_rotation(snake_case ) __SCREAMING_SNAKE_CASE : Any = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(snake_case ) return root class __UpperCamelCase : """simple docstring""" def __init__( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : MyNode | None = None def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return get_height(self.root ) def UpperCAmelCase__ ( self : str , _A : Any ): """simple docstring""" print('''insert:''' + str(_A ) ) __SCREAMING_SNAKE_CASE : Dict = insert_node(self.root , _A ) def UpperCAmelCase__ ( self : Tuple , _A : Any ): """simple docstring""" print('''delete:''' + str(_A ) ) if self.root is None: print('''Tree is empty!''' ) return __SCREAMING_SNAKE_CASE : int = del_node(self.root , _A ) def __str__( self : int , ): # a level traversale, gives a more intuitive look on the tree """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = '''''' __SCREAMING_SNAKE_CASE : Optional[int] = MyQueue() q.push(self.root ) __SCREAMING_SNAKE_CASE : Any = self.get_height() if layer == 0: return output __SCREAMING_SNAKE_CASE : Optional[int] = 0 while not q.is_empty(): __SCREAMING_SNAKE_CASE : str = q.pop() __SCREAMING_SNAKE_CASE : Union[str, Any] = ''' ''' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(_A ) q.push(_A ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space __SCREAMING_SNAKE_CASE : Optional[int] = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , _A ) - 1: __SCREAMING_SNAKE_CASE : Union[str, Any] = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def a__ ( ): """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() lowercase_ = AVLtree() lowercase_ = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
from itertools import count def a__ ( snake_case = 50 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [1] * min_block_length for n in count(snake_case ): fill_count_functions.append(1 ) for block_length in range(snake_case , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_000_000: break return n if __name__ == "__main__": print(f'''{solution() = }''')
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase_ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase_ = [] lowercase_ = [] lowercase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase_ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''', """emoji""": True, }, } ] lowercase_ = 0 for log in Path().glob("""*.log"""): lowercase_ = 0 with open(log, """r""") as f: for line in f: lowercase_ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase_ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase_ = f'''{line['duration']:.4f}''' if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase_ = [] log.unlink() lowercase_ = """""" lowercase_ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowercase_ = [] lowercase_ = {} for test in failed_tests: lowercase_ = test[0].split("""::""") lowercase_ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase_ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase_ = [test[0] for test in failed_table] lowercase_ = list(set(files)) # Count number of instances in failed_tests lowercase_ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase_ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: lowercase_ = """Too many failed tests, please see the full report in the Action results.""" lowercase_ = len(err) + 10 lowercase_ = message[: 3_000 - offset] + f'''\n...\n```\n{err}''' print(f'''### {message}''') else: lowercase_ = """No failed tests! 🤗""" print(f'''## {message}''') payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase_ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } payload.append(action_button) lowercase_ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''', } ], } payload.append(date_report) lowercase_ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase_ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase_ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase_ = row[0] else: lowercase_ = """""" lowercase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''', }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''image_processor''', '''tokenizer'''] lowerCAmelCase_ = '''CLIPImageProcessor''' lowerCAmelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Dict , _A : List[str]=None , _A : List[Any]=None , **_A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) __SCREAMING_SNAKE_CASE : str = kwargs.pop('''feature_extractor''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_A , _A ) def __call__( self : List[str] , _A : Union[str, Any]=None , _A : Union[str, Any]=None , _A : Tuple=None , **_A : Optional[int] ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(_A , return_tensors=_A , **_A ) if images is not None: __SCREAMING_SNAKE_CASE : str = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def UpperCAmelCase__ ( self : List[Any] , *_A : Tuple , **_A : Any ): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A ) def UpperCAmelCase__ ( self : Optional[int] , *_A : Optional[Any] , **_A : Optional[Any] ): """simple docstring""" return self.tokenizer.decode(*_A , **_A ) @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.tokenizer.model_input_names __SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , ) return self.image_processor_class @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , ) return self.image_processor
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowercase_ = False lowercase_ = True lowercase_ = False if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowercase_ = parser.parse_args() lowercase_ = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } lowercase_ = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } lowercase_ = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: lowercase_ = reader.read() lowercase_ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): lowercase_ = UNetaDModel(**config) else: lowercase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel lowercase_ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowercase_ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowercase_ = config[key] del config[key] lowercase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] lowercase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: lowercase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) lowercase_ = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue lowercase_ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: lowercase_ = param_value lowercase_ = True if not has_changed: lowercase_ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
from __future__ import annotations def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = [True] * limit __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : List[Any] = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): __SCREAMING_SNAKE_CASE : Any = i * 2 while index < limit: __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : int = index + i __SCREAMING_SNAKE_CASE : List[str] = [2] for i in range(3 , snake_case , 2 ): if is_prime[i]: primes.append(snake_case ) return primes def a__ ( snake_case = 1_000_000 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = prime_sieve(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = 0 __SCREAMING_SNAKE_CASE : Any = 0 for i in range(len(snake_case ) ): for j in range(i + length , len(snake_case ) ): __SCREAMING_SNAKE_CASE : Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __SCREAMING_SNAKE_CASE : Optional[int] = j - i __SCREAMING_SNAKE_CASE : List[Any] = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case=True ): """simple docstring""" model.train() __SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case ) __SCREAMING_SNAKE_CASE : Dict = F.mse_loss(snake_case , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case ) def a__ ( snake_case , snake_case=False ): """simple docstring""" set_seed(42 ) __SCREAMING_SNAKE_CASE : str = RegressionModel() __SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = RegressionDataset(length=80 ) __SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(snake_case , batch_size=16 ) model.to(accelerator.device ) if sched: __SCREAMING_SNAKE_CASE : Tuple = AdamW(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE : Any = AdamW(params=ddp_model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE : List[Any] = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.65 ) __SCREAMING_SNAKE_CASE : Any = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.65 ) # Make a copy of `model` if sched: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(snake_case , snake_case , snake_case , snake_case ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(snake_case , snake_case ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def a__ ( snake_case ): """simple docstring""" # Test when on a single CPU or GPU that the context manager does nothing __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = get_training_setup(snake_case ) # Use a single batch __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = next(iter(snake_case ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = accelerator.gather((ddp_input, ddp_target) ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case , snake_case , snake_case , snake_case ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) else: # Sync grads step_model(snake_case , snake_case , snake_case , snake_case ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case , snake_case , snake_case , snake_case ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) __SCREAMING_SNAKE_CASE : Optional[int] = ddp_input[torch.randperm(len(snake_case ) )] def a__ ( snake_case ): """simple docstring""" # Test on distributed setup that context manager behaves properly __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = get_training_setup(snake_case ) # Use a single batch __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = next(iter(snake_case ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = accelerator.gather((ddp_input, ddp_target) ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case , snake_case , snake_case , snake_case ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) else: # Sync grads step_model(snake_case , snake_case , snake_case , snake_case ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) __SCREAMING_SNAKE_CASE : List[str] = ddp_input[torch.randperm(len(snake_case ) )] def a__ ( snake_case=False , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Accelerator( split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = get_training_setup(snake_case ) for iteration, batch in enumerate(snake_case ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = batch.values() # Gather the distributed inputs and targs for the base model __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case , snake_case , snake_case , snake_case , snake_case ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) __SCREAMING_SNAKE_CASE : Optional[Any] = ddp_input[torch.randperm(len(snake_case ) )] GradientState._reset_state() def a__ ( snake_case=False , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = Accelerator( split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = get_training_setup(snake_case , snake_case ) for iteration, batch in enumerate(snake_case ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = batch.values() # Gather the distributed inputs and targs for the base model __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = accelerator.gather((ddp_input, ddp_target) ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case , snake_case , snake_case , snake_case , snake_case ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' __SCREAMING_SNAKE_CASE : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case )) if accelerator.num_processes > 1: check_model_parameters(snake_case , snake_case , snake_case , snake_case ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = Accelerator() __SCREAMING_SNAKE_CASE : int = RegressionDataset(length=80 ) __SCREAMING_SNAKE_CASE : Dict = DataLoader(snake_case , batch_size=16 ) __SCREAMING_SNAKE_CASE : str = RegressionDataset(length=96 ) __SCREAMING_SNAKE_CASE : List[str] = DataLoader(snake_case , batch_size=16 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = accelerator.prepare(snake_case , snake_case ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case ) if iteration < len(snake_case ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case ) if batch_num < len(snake_case ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator() __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(snake_case ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(snake_case ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(snake_case , snake_case ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case , snake_case ) def a__ ( snake_case ): """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : List[str] , _A : int , _A : int ): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) __SCREAMING_SNAKE_CASE : Dict = img __SCREAMING_SNAKE_CASE : Optional[int] = img.shape[1] __SCREAMING_SNAKE_CASE : Tuple = img.shape[0] __SCREAMING_SNAKE_CASE : List[str] = dst_width __SCREAMING_SNAKE_CASE : Tuple = dst_height __SCREAMING_SNAKE_CASE : Tuple = self.src_w / self.dst_w __SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h __SCREAMING_SNAKE_CASE : Optional[int] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" for i in range(self.dst_h ): for j in range(self.dst_w ): __SCREAMING_SNAKE_CASE : str = self.img[self.get_y(_A )][self.get_x(_A )] def UpperCAmelCase__ ( self : Tuple , _A : int ): """simple docstring""" return int(self.ratio_x * x ) def UpperCAmelCase__ ( self : Any , _A : int ): """simple docstring""" return int(self.ratio_y * y ) if __name__ == "__main__": lowercase_ , lowercase_ = 800, 600 lowercase_ = imread("""image_data/lena.jpg""", 1) lowercase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __SCREAMING_SNAKE_CASE : Any = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } __SCREAMING_SNAKE_CASE : Dict = F'''{src_lang}-{tgt_lang}''' __SCREAMING_SNAKE_CASE : Tuple = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(snake_case , exist_ok=snake_case ) __SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , '''README.md''' ) print(F'''Generating {path}''' ) with open(snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(snake_case ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowercase_ = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowercase_ = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) lowercase_ = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions lowercase_ = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) lowercase_ = tf.keras.preprocessing.image.img_to_array(test_image) lowercase_ = np.expand_dims(test_image, axis=0) lowercase_ = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowercase_ = """Normal""" if result[0][0] == 1: lowercase_ = """Abnormality detected"""
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""", """facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''xlm-roberta-xl''' def __init__( self : Optional[int] , _A : Optional[Any]=25_0880 , _A : Tuple=2560 , _A : Optional[int]=36 , _A : Dict=32 , _A : int=1_0240 , _A : Dict="gelu" , _A : Tuple=0.1 , _A : Tuple=0.1 , _A : List[Any]=514 , _A : List[Any]=1 , _A : Optional[int]=0.02 , _A : Any=1e-05 , _A : str=1 , _A : Tuple=0 , _A : int=2 , _A : Dict="absolute" , _A : Optional[int]=True , _A : int=None , **_A : List[Any] , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = hidden_act __SCREAMING_SNAKE_CASE : Tuple = intermediate_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Any = max_position_embeddings __SCREAMING_SNAKE_CASE : str = type_vocab_size __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type __SCREAMING_SNAKE_CASE : Dict = use_cache __SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : """simple docstring""" @staticmethod def UpperCAmelCase__ ( *_A : Tuple , **_A : List[str] ): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) __SCREAMING_SNAKE_CASE : str = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self : Any , _A : Dict , _A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = vqa_pipeline(_A , top_k=1 ) self.assertEqual( _A , [ [{'''score''': ANY(_A ), '''answer''': ANY(_A )}], [{'''score''': ANY(_A ), '''answer''': ANY(_A )}], ] , ) @require_torch def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) __SCREAMING_SNAKE_CASE : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __SCREAMING_SNAKE_CASE : List[Any] = '''How many cats are there?''' __SCREAMING_SNAKE_CASE : str = vqa_pipeline(image=_A , question='''How many cats are there?''' , top_k=2 ) self.assertEqual( _A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( _A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] ) @slow @require_torch def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' ) __SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __SCREAMING_SNAKE_CASE : Optional[int] = '''How many cats are there?''' __SCREAMING_SNAKE_CASE : Union[str, Any] = vqa_pipeline(image=_A , question=_A , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] ) __SCREAMING_SNAKE_CASE : Any = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] ) __SCREAMING_SNAKE_CASE : Optional[int] = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''' ) def UpperCAmelCase__ ( self : str ): """simple docstring""" pass
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) lowercase_ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''swin''' lowerCAmelCase_ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , _A : int=224 , _A : Any=4 , _A : List[Any]=3 , _A : int=96 , _A : Dict=[2, 2, 6, 2] , _A : Optional[int]=[3, 6, 12, 24] , _A : int=7 , _A : Optional[Any]=4.0 , _A : Optional[int]=True , _A : Optional[int]=0.0 , _A : int=0.0 , _A : Tuple=0.1 , _A : Tuple="gelu" , _A : List[Any]=False , _A : Optional[int]=0.02 , _A : Optional[int]=1e-5 , _A : Any=32 , _A : int=None , _A : Dict=None , **_A : Dict , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : List[str] = patch_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim __SCREAMING_SNAKE_CASE : List[str] = depths __SCREAMING_SNAKE_CASE : List[str] = len(_A ) __SCREAMING_SNAKE_CASE : str = num_heads __SCREAMING_SNAKE_CASE : Optional[int] = window_size __SCREAMING_SNAKE_CASE : Tuple = mlp_ratio __SCREAMING_SNAKE_CASE : Tuple = qkv_bias __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : int = drop_path_rate __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Dict = use_absolute_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __SCREAMING_SNAKE_CASE : Any = int(embed_dim * 2 ** (len(_A ) - 1) ) __SCREAMING_SNAKE_CASE : Optional[int] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_A ) + 1 )] __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices( out_features=_A , out_indices=_A , stage_names=self.stage_names ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return 1e-4
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [] if len(snake_case ) == 1: return [nums.copy()] for _ in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : Optional[int] = nums.pop(0 ) __SCREAMING_SNAKE_CASE : int = permute(snake_case ) for perm in permutations: perm.append(snake_case ) result.extend(snake_case ) nums.append(snake_case ) return result def a__ ( snake_case ): """simple docstring""" def backtrack(snake_case ): if start == len(snake_case ) - 1: output.append(nums[:] ) else: for i in range(snake_case , len(snake_case ) ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = nums[i], nums[start] backtrack(start + 1 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = nums[i], nums[start] # backtrack __SCREAMING_SNAKE_CASE : Optional[Any] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowercase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = {} class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''llama''' lowerCAmelCase_ = ['''past_key_values'''] def __init__( self : Any , _A : Any=3_2000 , _A : Any=4096 , _A : Dict=1_1008 , _A : Dict=32 , _A : str=32 , _A : Any=None , _A : Tuple="silu" , _A : List[str]=2048 , _A : Optional[Any]=0.02 , _A : Tuple=1e-6 , _A : Any=True , _A : List[Any]=0 , _A : Optional[int]=1 , _A : Tuple=2 , _A : Dict=1 , _A : List[Any]=False , _A : Optional[Any]=None , **_A : Union[str, Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size __SCREAMING_SNAKE_CASE : Dict = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size __SCREAMING_SNAKE_CASE : int = intermediate_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads # for backward compatibility if num_key_value_heads is None: __SCREAMING_SNAKE_CASE : List[str] = num_attention_heads __SCREAMING_SNAKE_CASE : Union[str, Any] = num_key_value_heads __SCREAMING_SNAKE_CASE : Optional[int] = hidden_act __SCREAMING_SNAKE_CASE : List[str] = initializer_range __SCREAMING_SNAKE_CASE : Any = rms_norm_eps __SCREAMING_SNAKE_CASE : str = pretraining_tp __SCREAMING_SNAKE_CASE : List[str] = use_cache __SCREAMING_SNAKE_CASE : List[Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'''got {self.rope_scaling}''' ) __SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''type''' , _A ) __SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
from __future__ import annotations import math lowercase_ = """2020.9.26""" lowercase_ = """xcodz-dot, cclaus, dhruvmanila""" def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" if not all(isinstance(snake_case , (float, int) ) for val in locals().values() ): __SCREAMING_SNAKE_CASE : Optional[int] = F'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ((x * distance) / (z + distance)) * scale __SCREAMING_SNAKE_CASE : int = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" if not isinstance(snake_case , snake_case ): raise TypeError('''Axis must be a str''' ) __SCREAMING_SNAKE_CASE : List[str] = locals() del input_variables["axis"] if not all(isinstance(snake_case , (float, int) ) for val in input_variables.values() ): __SCREAMING_SNAKE_CASE : List[Any] = ( '''Input values except axis must either be float or int: ''' F'''{list(input_variables.values() )}''' ) raise TypeError(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = (angle % 360) / 450 * 180 / math.pi if axis == "z": __SCREAMING_SNAKE_CASE : Tuple = x * math.cos(snake_case ) - y * math.sin(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = y * math.cos(snake_case ) + x * math.sin(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = z elif axis == "x": __SCREAMING_SNAKE_CASE : List[str] = y * math.cos(snake_case ) - z * math.sin(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = z * math.cos(snake_case ) + y * math.sin(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = x elif axis == "y": __SCREAMING_SNAKE_CASE : List[str] = x * math.cos(snake_case ) - z * math.sin(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = z * math.cos(snake_case ) + x * math.sin(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''') print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
import torch from diffusers import StableDiffusionPipeline lowercase_ = """path-to-your-trained-model""" lowercase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") lowercase_ = """A photo of sks dog in a bucket""" lowercase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
from pathlib import Path import fire from tqdm import tqdm def a__ ( snake_case="ro" , snake_case="en" , snake_case="wmt16" , snake_case=None ): """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) __SCREAMING_SNAKE_CASE : str = F'''{src_lang}-{tgt_lang}''' print(F'''Converting {dataset}-{pair}''' ) __SCREAMING_SNAKE_CASE : Dict = datasets.load_dataset(snake_case , snake_case ) if save_dir is None: __SCREAMING_SNAKE_CASE : List[str] = F'''{dataset}-{pair}''' __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) save_dir.mkdir(exist_ok=snake_case ) for split in ds.keys(): print(F'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets __SCREAMING_SNAKE_CASE : Optional[int] = '''val''' if split == '''validation''' else split __SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(F'''{fn}.source''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(F'''{fn}.target''' ) __SCREAMING_SNAKE_CASE : Dict = src_path.open('''w+''' ) __SCREAMING_SNAKE_CASE : int = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __SCREAMING_SNAKE_CASE : Tuple = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(F'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class __UpperCamelCase : """simple docstring""" def __init__( self : List[str] , _A : Dict , _A : Tuple=13 , _A : Any=7 , _A : Optional[int]=True , _A : str=True , _A : List[str]=True , _A : int=True , _A : Tuple=99 , _A : List[Any]=32 , _A : List[str]=5 , _A : Union[str, Any]=4 , _A : Dict=4 , _A : Tuple="gelu" , _A : Any=0.0 , _A : List[Any]=0.1 , _A : Any=True , _A : Optional[int]=512 , _A : Optional[int]=16 , _A : Optional[int]=2 , _A : List[str]=0.02 , _A : Tuple=3 , _A : Tuple=4 , _A : Optional[Any]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : List[str] = is_training __SCREAMING_SNAKE_CASE : int = use_input_mask __SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids __SCREAMING_SNAKE_CASE : Optional[int] = use_labels __SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size __SCREAMING_SNAKE_CASE : Dict = hidden_size __SCREAMING_SNAKE_CASE : int = num_hidden_layers __SCREAMING_SNAKE_CASE : Any = num_attention_heads __SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_multiple_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout __SCREAMING_SNAKE_CASE : str = attention_dropout __SCREAMING_SNAKE_CASE : List[str] = weight_tying __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : List[str] = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices __SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: __SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE : List[str] = True return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = GPTNeoXJapaneseModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = True __SCREAMING_SNAKE_CASE : List[Any] = GPTNeoXJapaneseModel(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : List[str] , _A : Union[str, Any] , _A : int , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = True __SCREAMING_SNAKE_CASE : Any = GPTNeoXJapaneseForCausalLM(config=_A ) model.to(_A ) model.eval() # first forward pass __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A , use_cache=_A ) __SCREAMING_SNAKE_CASE : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A , output_hidden_states=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past['''hidden_states'''][0] __SCREAMING_SNAKE_CASE : Tuple = model( _A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0] # select random slice __SCREAMING_SNAKE_CASE : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = config_and_inputs __SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () lowerCAmelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = ( {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXJapaneseModelTester(self ) __SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_A , _A , _A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE : Tuple = None self.model_tester.create_and_check_model_as_decoder(_A , _A , _A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_A ) @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = '''abeja/gpt-neox-japanese-2.7b''' __SCREAMING_SNAKE_CASE : List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、'''] __SCREAMING_SNAKE_CASE : str = [ '''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''', '''100年後に必要とされる会社は、「人」が中心の会社です。''', '''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''', '''国境の長いトンネルを抜けると、そこは雪国だった。''', '''美味しい日本食といえば、やっぱりお寿司ですよね。''', ] __SCREAMING_SNAKE_CASE : List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : int = GPTNeoXJapaneseForCausalLM.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : Any = [] for prompt in prompts: __SCREAMING_SNAKE_CASE : str = tokenizer(_A , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE : Any = model.generate(_A , max_length=50 ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A ) predicted_outputs += generated_string self.assertListEqual(_A , _A )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowercase_ = get_logger(__name__) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case=0 ): """simple docstring""" os.makedirs(snake_case , exist_ok=snake_case ) with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __SCREAMING_SNAKE_CASE : List[str] = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __SCREAMING_SNAKE_CASE : Optional[int] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(snake_case , snake_case ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(snake_case , snake_case ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __SCREAMING_SNAKE_CASE : List[Any] = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , snake_case ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(snake_case , snake_case ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(snake_case , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(snake_case , exist_ok=snake_case ) logger.info(F'''Saving model to {ckpt_dir}''' ) __SCREAMING_SNAKE_CASE : Optional[int] = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return __SCREAMING_SNAKE_CASE : Optional[int] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __SCREAMING_SNAKE_CASE : int = os.path.join(snake_case , snake_case ) logger.info(F'''Loading model from {input_model_file}''' ) __SCREAMING_SNAKE_CASE : str = torch.load(snake_case ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __SCREAMING_SNAKE_CASE : Optional[int] = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __SCREAMING_SNAKE_CASE : Tuple = os.path.join(snake_case , snake_case ) logger.info(F'''Loading model from {input_model_file}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( os.path.join(snake_case , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , ) __SCREAMING_SNAKE_CASE : Optional[int] = state_dict['''model'''] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(snake_case ) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=0 ): """simple docstring""" os.makedirs(snake_case , exist_ok=snake_case ) with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __SCREAMING_SNAKE_CASE : Optional[Any] = FSDP.optim_state_dict(snake_case , snake_case ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __SCREAMING_SNAKE_CASE : Dict = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case , snake_case ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(snake_case , snake_case ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: __SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(snake_case , exist_ok=snake_case ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __SCREAMING_SNAKE_CASE : Tuple = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __SCREAMING_SNAKE_CASE : int = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: __SCREAMING_SNAKE_CASE : List[Any] = ( os.path.join(snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) __SCREAMING_SNAKE_CASE : str = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case ) , ) __SCREAMING_SNAKE_CASE : Tuple = optim_state['''optimizer'''] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) __SCREAMING_SNAKE_CASE : List[str] = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case ) optimizer.load_state_dict(snake_case )
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType lowercase_ , lowercase_ , lowercase_ = False, False, False @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = None lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = None # Automatically constructed lowerCAmelCase_ = "dict" lowerCAmelCase_ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCAmelCase_ = field(default='''Audio''' , init=lowerCAmelCase__ , repr=lowerCAmelCase__ ) def __call__( self : int ): """simple docstring""" return self.pa_type def UpperCAmelCase__ ( self : str , _A : Union[str, bytes, dict] ): """simple docstring""" try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(_A , _A ): return {"bytes": None, "path": value} elif isinstance(_A , _A ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __SCREAMING_SNAKE_CASE : List[Any] = BytesIO() sf.write(_A , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2767 else: __SCREAMING_SNAKE_CASE : Optional[int] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2767 __SCREAMING_SNAKE_CASE : str = BytesIO(bytes() ) sf.write(_A , _A , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def UpperCAmelCase__ ( self : List[Any] , _A : dict , _A : Optional[Dict[str, Union[str, bool, None]]] = None ): """simple docstring""" if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err __SCREAMING_SNAKE_CASE : List[Any] = xsplitext(_A )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: __SCREAMING_SNAKE_CASE : str = token_per_repo_id or {} __SCREAMING_SNAKE_CASE : int = path.split('''::''' )[-1] try: __SCREAMING_SNAKE_CASE : int = string_to_dict(_A , config.HUB_DATASETS_URL )['''repo_id'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = token_per_repo_id[repo_id] except (ValueError, KeyError): __SCREAMING_SNAKE_CASE : str = None with xopen(_A , '''rb''' , use_auth_token=_A ) as f: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = sf.read(_A ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = sf.read(_A ) __SCREAMING_SNAKE_CASE : Tuple = array.T if self.mono: __SCREAMING_SNAKE_CASE : List[Any] = librosa.to_mono(_A ) if self.sampling_rate and self.sampling_rate != sampling_rate: __SCREAMING_SNAKE_CASE : Any = librosa.resample(_A , orig_sr=_A , target_sr=self.sampling_rate ) __SCREAMING_SNAKE_CASE : List[Any] = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCAmelCase__ ( self : Any ): """simple docstring""" from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCAmelCase__ ( self : Any , _A : Union[pa.StringArray, pa.StructArray] ): """simple docstring""" if pa.types.is_string(storage.type ): __SCREAMING_SNAKE_CASE : str = pa.array([None] * len(_A ) , type=pa.binary() ) __SCREAMING_SNAKE_CASE : int = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __SCREAMING_SNAKE_CASE : Dict = pa.array([None] * len(_A ) , type=pa.string() ) __SCREAMING_SNAKE_CASE : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.array([Audio().encode_example(_A ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: __SCREAMING_SNAKE_CASE : List[str] = storage.field('''bytes''' ) else: __SCREAMING_SNAKE_CASE : Optional[int] = pa.array([None] * len(_A ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: __SCREAMING_SNAKE_CASE : Tuple = storage.field('''path''' ) else: __SCREAMING_SNAKE_CASE : int = pa.array([None] * len(_A ) , type=pa.string() ) __SCREAMING_SNAKE_CASE : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(_A , self.pa_type ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : pa.StructArray ): """simple docstring""" @no_op_if_value_is_null def path_to_bytes(_A : Tuple ): with xopen(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : List[Any] = f.read() return bytes_ __SCREAMING_SNAKE_CASE : Optional[Any] = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __SCREAMING_SNAKE_CASE : Tuple = pa.array( [os.path.basename(_A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) __SCREAMING_SNAKE_CASE : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(_A , self.pa_type )
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
from __future__ import annotations def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) def a__ ( snake_case ): """simple docstring""" if isinstance(snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(snake_case , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(snake_case ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''pixel_values'''] def __init__( self : Optional[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : int = size if size is not None else {'''shortest_edge''': 256} __SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __SCREAMING_SNAKE_CASE : int = get_size_dict(_A , param_name='''crop_size''' ) __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop __SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size __SCREAMING_SNAKE_CASE : Any = resample __SCREAMING_SNAKE_CASE : Tuple = do_rescale __SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor __SCREAMING_SNAKE_CASE : List[Any] = offset __SCREAMING_SNAKE_CASE : Any = do_normalize __SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: __SCREAMING_SNAKE_CASE : Optional[Any] = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: __SCREAMING_SNAKE_CASE : Dict = (size['''height'''], size['''width''']) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def UpperCAmelCase__ ( self : List[Any] , _A : np.ndarray , _A : Union[int, float] , _A : bool = True , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = image.astype(np.floataa ) if offset: __SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2) return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE : Any = to_numpy_array(_A ) if do_resize: __SCREAMING_SNAKE_CASE : int = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: __SCREAMING_SNAKE_CASE : int = self.center_crop(_A , size=_A ) if do_rescale: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.rescale(image=_A , scale=_A , offset=_A ) if do_normalize: __SCREAMING_SNAKE_CASE : Any = self.normalize(image=_A , mean=_A , std=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(_A , _A ) return image def UpperCAmelCase__ ( self : Dict , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE : str = offset if offset is not None else self.offset __SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size __SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size __SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , param_name='''crop_size''' ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) __SCREAMING_SNAKE_CASE : Tuple = make_batched(_A ) __SCREAMING_SNAKE_CASE : Tuple = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , offset=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] __SCREAMING_SNAKE_CASE : int = {'''pixel_values''': videos} return BatchFeature(data=_A , tensor_type=_A )
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowercase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , *_A : Dict , **_A : Tuple ): """simple docstring""" super().__init__(*_A , **_A ) requires_backends(self , '''vision''' ) self.check_model_type(_A ) def __call__( self : Dict , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : int ): """simple docstring""" return super().__call__(_A , **_A ) def UpperCAmelCase__ ( self : Optional[Any] , **_A : List[Any] ): """simple docstring""" return {}, {}, {} def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = load_image(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = image.size __SCREAMING_SNAKE_CASE : Any = self.image_processor(images=_A , return_tensors=self.framework ) return model_inputs def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.model(**_A ) return model_outputs def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = model_outputs.predicted_depth __SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = prediction.squeeze().cpu().numpy() __SCREAMING_SNAKE_CASE : Any = (output * 255 / np.max(_A )).astype('''uint8''' ) __SCREAMING_SNAKE_CASE : str = Image.fromarray(_A ) __SCREAMING_SNAKE_CASE : str = {} __SCREAMING_SNAKE_CASE : Tuple = predicted_depth __SCREAMING_SNAKE_CASE : str = depth return output_dict
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowercase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""DPTFeatureExtractor"""] lowercase_ = ["""DPTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """DPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DPTForDepthEstimation""", """DPTForSemanticSegmentation""", """DPTModel""", """DPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
from collections.abc import Iterable from typing import Any class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : int | None = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = value __SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier __SCREAMING_SNAKE_CASE : Node | None = None __SCREAMING_SNAKE_CASE : Node | None = None def __repr__( self : Optional[Any] ): """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 ) class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : Node | None = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = root def __str__( self : List[str] ): """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Optional[int] , _A : Node , _A : Node | None ): """simple docstring""" if new_children is not None: # reset its kids __SCREAMING_SNAKE_CASE : List[Any] = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children __SCREAMING_SNAKE_CASE : Tuple = new_children else: __SCREAMING_SNAKE_CASE : Tuple = new_children else: __SCREAMING_SNAKE_CASE : int = new_children def UpperCAmelCase__ ( self : Tuple , _A : Node ): """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : int , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = Node(_A ) # create a new Node if self.empty(): # if Tree is empty __SCREAMING_SNAKE_CASE : List[str] = new_node # set its root else: # Tree is not empty __SCREAMING_SNAKE_CASE : Optional[Any] = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: __SCREAMING_SNAKE_CASE : Optional[Any] = new_node # We insert the new node in a leaf break else: __SCREAMING_SNAKE_CASE : Dict = parent_node.left else: if parent_node.right is None: __SCREAMING_SNAKE_CASE : Tuple = new_node break else: __SCREAMING_SNAKE_CASE : Optional[Any] = parent_node.right __SCREAMING_SNAKE_CASE : Optional[Any] = parent_node def UpperCAmelCase__ ( self : str , *_A : int ): """simple docstring""" for value in values: self.__insert(_A ) def UpperCAmelCase__ ( self : str , _A : List[Any] ): """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: __SCREAMING_SNAKE_CASE : Any = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: __SCREAMING_SNAKE_CASE : Dict = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[Any] , _A : Node | None = None ): """simple docstring""" if node is None: if self.root is None: return None __SCREAMING_SNAKE_CASE : Tuple = self.root if not self.empty(): while node.right is not None: __SCREAMING_SNAKE_CASE : List[Any] = node.right return node def UpperCAmelCase__ ( self : int , _A : Node | None = None ): """simple docstring""" if node is None: __SCREAMING_SNAKE_CASE : Any = self.root if self.root is None: return None if not self.empty(): __SCREAMING_SNAKE_CASE : Optional[Any] = self.root while node.left is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = node.left return node def UpperCAmelCase__ ( self : Union[str, Any] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: __SCREAMING_SNAKE_CASE : Any = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore __SCREAMING_SNAKE_CASE : Dict = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : str , _A : Node | None ): """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : int , _A : int=None ): """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[Any] , _A : list , _A : Node | None ): """simple docstring""" if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def UpperCAmelCase__ ( self : Optional[Any] , _A : int , _A : Node ): """simple docstring""" __SCREAMING_SNAKE_CASE : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = [] if curr_node is not None: __SCREAMING_SNAKE_CASE : Dict = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) __SCREAMING_SNAKE_CASE : Any = BinarySearchTree() for i in testlist: t.insert(snake_case ) # Prints all the elements of the list in order traversal print(snake_case ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(snake_case ) print(snake_case ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = len(snake_case ) __SCREAMING_SNAKE_CASE : int = len(matrix[0] ) __SCREAMING_SNAKE_CASE : Tuple = min(snake_case , snake_case ) for row in range(snake_case ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , snake_case ): __SCREAMING_SNAKE_CASE : int = matrix[col][row] / matrix[row][row] for i in range(snake_case , snake_case ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __SCREAMING_SNAKE_CASE : Tuple = True for i in range(row + 1 , snake_case ): if matrix[i][row] != 0: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = matrix[i], matrix[row] __SCREAMING_SNAKE_CASE : Any = False break if reduce: rank -= 1 for i in range(snake_case ): __SCREAMING_SNAKE_CASE : Union[str, Any] = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput lowercase_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , *_A : Optional[int] , _A : Optional[int]=None , _A : Dict=None , _A : List[Any]=None , **_A : Optional[Any] ): """simple docstring""" super().__init__(*_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = eval_examples __SCREAMING_SNAKE_CASE : Optional[Any] = post_process_function __SCREAMING_SNAKE_CASE : Tuple = quant_trainer_args __SCREAMING_SNAKE_CASE : int = 128 # default number of calibration samples def UpperCAmelCase__ ( self : int , _A : Optional[int]=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __SCREAMING_SNAKE_CASE : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset __SCREAMING_SNAKE_CASE : str = self._remove_unused_columns(_A , description='''Calibration''' ) return DataLoader( _A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , ) def UpperCAmelCase__ ( self : Optional[int] , _A : Tuple=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.train_dataset if calib_dataset is None else calib_dataset __SCREAMING_SNAKE_CASE : Tuple = self.get_calib_dataloader(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.model quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A ) model.eval() quant_trainer.enable_calibration(_A ) logger.info('''***** Running calibration *****''' ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(_A ): # Prediction step __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.prediction_step(_A , _A , prediction_loss_only=_A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(_A , self.quant_trainer_args ) __SCREAMING_SNAKE_CASE : Dict = model def UpperCAmelCase__ ( self : List[str] , _A : int=None , _A : Optional[Any]=None , _A : List[Any]=None , _A : str = "eval" ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset __SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __SCREAMING_SNAKE_CASE : Optional[int] = eval_loop( _A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , ) finally: __SCREAMING_SNAKE_CASE : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __SCREAMING_SNAKE_CASE : Dict = self.post_process_function(_A , _A , output.predictions ) __SCREAMING_SNAKE_CASE : str = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __SCREAMING_SNAKE_CASE : Tuple = metrics.pop(_A ) self.log(_A ) else: __SCREAMING_SNAKE_CASE : str = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __SCREAMING_SNAKE_CASE : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def UpperCAmelCase__ ( self : Tuple , _A : int , _A : Optional[int] , _A : str=None , _A : str = "test" ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. __SCREAMING_SNAKE_CASE : str = self.compute_metrics __SCREAMING_SNAKE_CASE : Dict = None __SCREAMING_SNAKE_CASE : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_loop( _A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , ) finally: __SCREAMING_SNAKE_CASE : Tuple = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __SCREAMING_SNAKE_CASE : Dict = self.post_process_function(_A , _A , output.predictions , '''predict''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __SCREAMING_SNAKE_CASE : List[str] = metrics.pop(_A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A ) def UpperCAmelCase__ ( self : Dict , _A : List[Any]="./" ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.eval_dataset __SCREAMING_SNAKE_CASE : int = self.get_eval_dataloader(_A ) __SCREAMING_SNAKE_CASE : str = next(iter(_A ) ) # saving device - to make it consistent __SCREAMING_SNAKE_CASE : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __SCREAMING_SNAKE_CASE : List[Any] = tuple(v.to(_A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Dict = self.model.to(_A ) model.eval() model.float() __SCREAMING_SNAKE_CASE : str = model.module if hasattr(_A , '''module''' ) else model quant_trainer.configure_model(_A , self.quant_trainer_args ) __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , '''model.onnx''' ) logger.info(F'''exporting model to {output_model_file}''' ) __SCREAMING_SNAKE_CASE : List[str] = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( _A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=_A , ) logger.info('''onnx export finished''' )
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) + 1 __SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. __SCREAMING_SNAKE_CASE : Dict = [[0 for i in range(snake_case )] for j in range(snake_case )] # since string of zero length match pattern of zero length __SCREAMING_SNAKE_CASE : Tuple = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case ): for j in range(1 , snake_case ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": __SCREAMING_SNAKE_CASE : Tuple = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: __SCREAMING_SNAKE_CASE : List[Any] = 1 elif pattern[j - 2] in (input_string[i - 1], "."): __SCREAMING_SNAKE_CASE : int = dp[i - 1][j] else: __SCREAMING_SNAKE_CASE : Optional[Any] = 0 else: __SCREAMING_SNAKE_CASE : int = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowercase_ = """aab""" lowercase_ = """c*a*b""" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'''{input_string} matches the given pattern {pattern}''') else: print(f'''{input_string} does not match with the given pattern {pattern}''')
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): __SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 __SCREAMING_SNAKE_CASE : Optional[Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] __SCREAMING_SNAKE_CASE : str = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(snake_case )-1}''' ) if "norm" in key: __SCREAMING_SNAKE_CASE : Tuple = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 __SCREAMING_SNAKE_CASE : str = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] __SCREAMING_SNAKE_CASE : Any = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(snake_case )-1}''' ) if "layer_norm1" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 __SCREAMING_SNAKE_CASE : List[str] = key[key.find('''block''' ) + len('''block''' )] __SCREAMING_SNAKE_CASE : Dict = key.replace(F'''block{idx}''' , F'''block.{int(snake_case )-1}''' ) if "attn.q" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: __SCREAMING_SNAKE_CASE : Tuple = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: __SCREAMING_SNAKE_CASE : int = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: __SCREAMING_SNAKE_CASE : int = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 __SCREAMING_SNAKE_CASE : List[str] = key[key.find('''linear_c''' ) + len('''linear_c''' )] __SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(snake_case )-1}''' ) if "bot_conv" in key: __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: __SCREAMING_SNAKE_CASE : int = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: __SCREAMING_SNAKE_CASE : Any = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: __SCREAMING_SNAKE_CASE : Any = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: __SCREAMING_SNAKE_CASE : Tuple = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: __SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: __SCREAMING_SNAKE_CASE : Any = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''module.last_layer_depth''' , '''head.head''' ) __SCREAMING_SNAKE_CASE : Tuple = value return new_state_dict def a__ ( snake_case , snake_case ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) __SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) __SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE : Dict = kv_weight[ : config.hidden_sizes[i], : ] __SCREAMING_SNAKE_CASE : str = kv_bias[: config.hidden_sizes[i]] __SCREAMING_SNAKE_CASE : List[str] = kv_weight[ config.hidden_sizes[i] :, : ] __SCREAMING_SNAKE_CASE : List[Any] = kv_bias[config.hidden_sizes[i] :] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return image @torch.no_grad() def a__ ( snake_case , snake_case , snake_case=False , snake_case=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) __SCREAMING_SNAKE_CASE : Union[str, Any] = GLPNImageProcessor() # prepare image __SCREAMING_SNAKE_CASE : str = prepare_img() __SCREAMING_SNAKE_CASE : str = image_processor(images=snake_case , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict __SCREAMING_SNAKE_CASE : Optional[int] = torch.load(snake_case , map_location=torch.device('''cpu''' ) ) # rename keys __SCREAMING_SNAKE_CASE : str = rename_keys(snake_case ) # key and value matrices need special treatment read_in_k_v(snake_case , snake_case ) # create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE : Dict = GLPNForDepthEstimation(snake_case ) model.load_state_dict(snake_case ) model.eval() # forward pass __SCREAMING_SNAKE_CASE : int = model(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: __SCREAMING_SNAKE_CASE : int = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) __SCREAMING_SNAKE_CASE : Any = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case , ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) lowercase_ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
import string import numpy def a__ ( snake_case , snake_case ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , snake_case ) class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) lowerCAmelCase_ = numpy.vectorize(lambda lowerCAmelCase__ : x % 36 ) lowerCAmelCase_ = numpy.vectorize(lowerCAmelCase__ ) def __init__( self : List[Any] , _A : numpy.ndarray ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.modulus(_A ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __SCREAMING_SNAKE_CASE : str = encrypt_key.shape[0] def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" return self.key_string.index(_A ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : int ): """simple docstring""" return self.key_string[round(_A )] def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __SCREAMING_SNAKE_CASE : str = det % len(self.key_string ) __SCREAMING_SNAKE_CASE : Any = len(self.key_string ) if greatest_common_divisor(_A , len(self.key_string ) ) != 1: __SCREAMING_SNAKE_CASE : Tuple = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(_A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [char for char in text.upper() if char in self.key_string] __SCREAMING_SNAKE_CASE : str = chars[-1] while len(_A ) % self.break_key != 0: chars.append(_A ) return "".join(_A ) def UpperCAmelCase__ ( self : Any , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.process_text(text.upper() ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''''' for i in range(0 , len(_A ) - self.break_key + 1 , self.break_key ): __SCREAMING_SNAKE_CASE : str = text[i : i + self.break_key] __SCREAMING_SNAKE_CASE : Dict = [self.replace_letters(_A ) for char in batch] __SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.array([vec] ).T __SCREAMING_SNAKE_CASE : str = self.modulus(self.encrypt_key.dot(_A ) ).T.tolist()[ 0 ] __SCREAMING_SNAKE_CASE : List[str] = ''''''.join( self.replace_digits(_A ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __SCREAMING_SNAKE_CASE : str = det % len(self.key_string ) __SCREAMING_SNAKE_CASE : List[Any] = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __SCREAMING_SNAKE_CASE : Optional[int] = i break __SCREAMING_SNAKE_CASE : Any = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_A ) ) def UpperCAmelCase__ ( self : Any , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.make_decrypt_key() __SCREAMING_SNAKE_CASE : str = self.process_text(text.upper() ) __SCREAMING_SNAKE_CASE : Optional[int] = '''''' for i in range(0 , len(_A ) - self.break_key + 1 , self.break_key ): __SCREAMING_SNAKE_CASE : Optional[int] = text[i : i + self.break_key] __SCREAMING_SNAKE_CASE : Tuple = [self.replace_letters(_A ) for char in batch] __SCREAMING_SNAKE_CASE : Any = numpy.array([vec] ).T __SCREAMING_SNAKE_CASE : Dict = self.modulus(decrypt_key.dot(_A ) ).T.tolist()[0] __SCREAMING_SNAKE_CASE : str = ''''''.join( self.replace_digits(_A ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = int(input('''Enter the order of the encryption key: ''' ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(snake_case ): __SCREAMING_SNAKE_CASE : Dict = [int(snake_case ) for x in input().split()] hill_matrix.append(snake_case ) __SCREAMING_SNAKE_CASE : Dict = HillCipher(numpy.array(snake_case ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) __SCREAMING_SNAKE_CASE : Dict = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": __SCREAMING_SNAKE_CASE : Union[str, Any] = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(snake_case ) ) elif option == "2": __SCREAMING_SNAKE_CASE : int = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1