code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =42
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> list[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase = all_rotations(SCREAMING_SNAKE_CASE_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ),
}
return response
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase = int(SCREAMING_SNAKE_CASE_ )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase = [''''''] * len(SCREAMING_SNAKE_CASE_ )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ : str = 'Provide a string that I will generate its BWT transform: '
a__ : List[str] = input(entry_msg).strip()
a__ : str = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
a__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 51
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> int:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError('''String lengths must match!''' )
UpperCAmelCase : List[str] = 0
for chara, chara in zip(_lowerCAmelCase , _lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
| 0
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : List[str] , _lowercase : Tuple=None , **_lowercase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
a : List[str] = [x.strip() for x in open(_lowercase ).readlines()]
a : Any = [x.strip() for x in open(_lowercase ).readlines()][: len(_lowercase )]
a : Dict = calculate_rouge(_lowercase , _lowercase , **_lowercase )
if save_path is not None:
save_json(_lowercase , _lowercase , indent=_lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 31
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 1
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , A__ : Optional[int] , A__ : List[str] ):
"""simple docstring"""
__lowerCamelCase : Tuple = name
__lowerCamelCase : List[Any] = val
def __str__( self : Dict ):
"""simple docstring"""
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Optional[int] , A__ : List[str] ):
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE :
def __init__( self : str , A__ : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Any = {}
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Union[str, Any] = self.build_heap(A__ )
def __getitem__( self : int , A__ : List[str] ):
"""simple docstring"""
return self.get_value(A__ )
def a_ ( self : Optional[int] , A__ : Optional[int] ):
"""simple docstring"""
return (idx - 1) // 2
def a_ ( self : Dict , A__ : List[str] ):
"""simple docstring"""
return idx * 2 + 1
def a_ ( self : Any , A__ : Optional[int] ):
"""simple docstring"""
return idx * 2 + 2
def a_ ( self : str , A__ : Union[str, Any] ):
"""simple docstring"""
return self.heap_dict[key]
def a_ ( self : List[Any] , A__ : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = len(A__ ) - 1
__lowerCamelCase : Any = self.get_parent_idx(A__ )
for idx, i in enumerate(A__ ):
__lowerCamelCase : Optional[int] = idx
__lowerCamelCase : List[str] = i.val
for i in range(A__ , -1 , -1 ):
self.sift_down(A__ , A__ )
return array
def a_ ( self : Union[str, Any] , A__ : List[Any] , A__ : Optional[Any] ):
"""simple docstring"""
while True:
__lowerCamelCase : List[Any] = self.get_left_child_idx(A__ ) # noqa: E741
__lowerCamelCase : List[Any] = self.get_right_child_idx(A__ )
__lowerCamelCase : Optional[int] = idx
if l < len(A__ ) and array[l] < array[idx]:
__lowerCamelCase : Tuple = l
if r < len(A__ ) and array[r] < array[smallest]:
__lowerCamelCase : str = r
if smallest != idx:
__lowerCamelCase , __lowerCamelCase : Dict = array[smallest], array[idx]
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCamelCase : List[Any] = smallest
else:
break
def a_ ( self : Optional[Any] , A__ : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = self.get_parent_idx(A__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.heap[idx], self.heap[p]
__lowerCamelCase , __lowerCamelCase : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCamelCase : Dict = p
__lowerCamelCase : List[str] = self.get_parent_idx(A__ )
def a_ ( self : Any ):
"""simple docstring"""
return self.heap[0]
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.heap[-1], self.heap[0]
__lowerCamelCase , __lowerCamelCase : str = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCamelCase : Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def a_ ( self : List[Any] , A__ : Union[str, Any] ):
"""simple docstring"""
self.heap.append(A__ )
__lowerCamelCase : str = len(self.heap ) - 1
__lowerCamelCase : Tuple = node.val
self.sift_up(len(self.heap ) - 1 )
def a_ ( self : int ):
"""simple docstring"""
return len(self.heap ) == 0
def a_ ( self : str , A__ : List[str] , A__ : str ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCamelCase : Optional[int] = new_value
__lowerCamelCase : Optional[int] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase__ :Optional[int] = Node("""R""", -1)
UpperCAmelCase__ :Any = Node("""B""", 6)
UpperCAmelCase__ :Tuple = Node("""A""", 3)
UpperCAmelCase__ :Dict = Node("""X""", 1)
UpperCAmelCase__ :Any = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase__ :int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase__ :str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , A__ : str , A__ : Optional[Any]=768 ):
"""simple docstring"""
super().__init__(A__ )
__lowerCamelCase : int = proj_size
__lowerCamelCase : List[Any] = CLIPVisionModel(A__ )
__lowerCamelCase : Tuple = PaintByExampleMapper(A__ )
__lowerCamelCase : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__lowerCamelCase : Optional[int] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def a_ ( self : Optional[int] , A__ : Any , A__ : List[Any]=False ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = self.model(pixel_values=A__ )
__lowerCamelCase : int = clip_output.pooler_output
__lowerCamelCase : List[Any] = self.mapper(latent_states[:, None] )
__lowerCamelCase : Any = self.final_layer_norm(A__ )
__lowerCamelCase : Union[str, Any] = self.proj_out(A__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , A__ : Any ):
"""simple docstring"""
super().__init__()
__lowerCamelCase : str = (config.num_hidden_layers + 1) // 5
__lowerCamelCase : Union[str, Any] = config.hidden_size
__lowerCamelCase : List[str] = 1
__lowerCamelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(A__ , A__ , A__ , activation_fn="""gelu""" , attention_bias=A__ )
for _ in range(A__ )
] )
def a_ ( self : Tuple , A__ : Any ):
"""simple docstring"""
for block in self.blocks:
__lowerCamelCase : Dict = block(A__ )
return hidden_states
| 150
| 1
|
from collections import deque
from .hash_table import HashTable
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Dict , *snake_case: Union[str, Any] , **snake_case: Dict ) -> Tuple:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str ) -> Optional[Any]:
snake_case_ :List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCAmelCase__ )
snake_case_ :Optional[Any] = self.values[key]
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
return (
sum(self.charge_factor - len(UpperCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self: List[Any] , snake_case: List[str] , snake_case: str=None ) -> List[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCAmelCase__ , UpperCAmelCase__ )
| 721
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__a = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
__a = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ :List[str] = state_dict.pop(_lowercase )
# emb -> embedding
if name.startswith("""emb.""" ):
snake_case_ :Any = name.replace("""emb.""", """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
snake_case_ :Dict = name.replace("""blocks.0.ln0""", """blocks.0.pre_ln""" )
# att -> attention
snake_case_ :List[str] = re.sub(r"""blocks\.(\d+)\.att""", r"""blocks.\1.attention""", _lowercase )
# ffn -> feed_forward
snake_case_ :Dict = re.sub(r"""blocks\.(\d+)\.ffn""", r"""blocks.\1.feed_forward""", _lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
snake_case_ :str = name.replace(""".time_mix_k""", """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
snake_case_ :List[Any] = name.replace(""".time_mix_v""", """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
snake_case_ :Dict = name.replace(""".time_mix_r""", """.time_mix_receptance""" )
if name != "head.weight":
snake_case_ :Optional[Any] = """rwkv.""" + name
snake_case_ :int = weight
return state_dict
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=False, _lowercase=None ):
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
snake_case_ :Dict = 50277
snake_case_ :Optional[Any] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
snake_case_ :List[Any] = PreTrainedTokenizerFast(tokenizer_file=_lowercase )
snake_case_ :int = len(_lowercase )
tokenizer.save_pretrained(_lowercase )
# 2. Build the config
snake_case_ :Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ :str = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
snake_case_ :Union[str, Any] = RwkvConfig(
vocab_size=_lowercase, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(_lowercase )
# 3. Download model file then convert state_dict
snake_case_ :List[str] = hf_hub_download(_lowercase, _lowercase )
snake_case_ :int = torch.load(_lowercase, map_location="""cpu""" )
snake_case_ :Any = convert_state_dict(_lowercase )
# 4. Split in shards and save
snake_case_, snake_case_ :Union[str, Any] = shard_checkpoint(_lowercase )
for shard_file, shard in shards.items():
torch.save(_lowercase, os.path.join(_lowercase, _lowercase ) )
if index is not None:
snake_case_ :List[str] = os.path.join(_lowercase, _lowercase )
# Save the index as well
with open(_lowercase, """w""", encoding="""utf-8""" ) as f:
snake_case_ :List[str] = json.dumps(_lowercase, indent=2, sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
snake_case_ :int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ :List[Any] = torch.load(os.path.join(_lowercase, _lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(_lowercase, _lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
snake_case_ :List[str] = AutoModelForCausalLM.from_pretrained(_lowercase )
model.push_to_hub(_lowercase, max_shard_size="""2GB""" )
tokenizer.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
__a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 310
| 0
|
'''simple docstring'''
from functools import lru_cache
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = 2
SCREAMING_SNAKE_CASE:Dict = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(snake_case )
if n > 1:
factors.add(snake_case )
return factors
@lru_cache
def A_ ( snake_case ):
return len(unique_prime_factors(snake_case ) )
def A_ ( snake_case ):
return len(set(snake_case ) ) in (0, 1)
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Tuple = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE:Optional[int] = [base + i for i in range(snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE:int = [upf_len(snake_case ) for x in group]
checker.append(snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(snake_case ):
return group
# Increment our base variable by 1
base += 1
def A_ ( snake_case = 4 ):
SCREAMING_SNAKE_CASE:List[str] = run(snake_case )
return results[0] if len(snake_case ) else None
if __name__ == "__main__":
print(solution())
| 143
|
'''simple docstring'''
def A_ ( snake_case = 600851475143 ):
try:
SCREAMING_SNAKE_CASE:str = int(snake_case )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
SCREAMING_SNAKE_CASE:Optional[Any] = 1
SCREAMING_SNAKE_CASE:Optional[Any] = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE:List[Any] = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE:List[str] = n
return int(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 143
| 1
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :BigBirdConfig
_SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE :bool = True
def _a ( self ) -> Dict:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ : str = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_a , **_a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().__call__(*_a , **_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = FlaxBigBirdForNaturalQuestionsModule
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
def cross_entropy(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE__ : Dict = logits.shape[-1]
SCREAMING_SNAKE_CASE__ : str = (labels[..., None] == jnp.arange(__lowerCAmelCase )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE__ : int = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ : str = reduction(__lowerCAmelCase )
return loss
SCREAMING_SNAKE_CASE__ : List[str] = partial(__lowerCAmelCase , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ : str = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = "google/bigbird-roberta-base"
_SCREAMING_SNAKE_CASE :int = 30_00
_SCREAMING_SNAKE_CASE :int = 1_05_00
_SCREAMING_SNAKE_CASE :int = 1_28
_SCREAMING_SNAKE_CASE :int = 3
_SCREAMING_SNAKE_CASE :int = 1
_SCREAMING_SNAKE_CASE :int = 5
# tx_args
_SCREAMING_SNAKE_CASE :float = 3E-5
_SCREAMING_SNAKE_CASE :float = 0.0
_SCREAMING_SNAKE_CASE :int = 2_00_00
_SCREAMING_SNAKE_CASE :float = 0.00_95
_SCREAMING_SNAKE_CASE :str = "bigbird-roberta-natural-questions"
_SCREAMING_SNAKE_CASE :str = "training-expt"
_SCREAMING_SNAKE_CASE :str = "data/nq-training.jsonl"
_SCREAMING_SNAKE_CASE :str = "data/nq-validation.jsonl"
def _a ( self ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=_a )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int
_SCREAMING_SNAKE_CASE :int = 40_96 # no dynamic padding on TPUs
def __call__( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.collate_fn(_a )
SCREAMING_SNAKE_CASE__ : List[str] = jax.tree_util.tree_map(_a , _a )
return batch
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""input_ids""": jnp.array(_a , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_a , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self._fetch_inputs(_a ) for ids in input_ids]
return zip(*_a )
def _a ( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [1 for _ in range(len(_a ) )]
while len(_a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
if seed is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset.shuffle(seed=__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) // batch_size ):
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCAmelCase )
@partial(jax.pmap , axis_name="""batch""" )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
def loss_fn(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ : str = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ : Dict = state.apply_fn(**__lowerCAmelCase , params=__lowerCAmelCase , dropout_rng=__lowerCAmelCase , train=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = outputs
return state.loss_fn(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.random.split(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = jax.value_and_grad(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ : Optional[int] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ : int = jax.lax.pmean(__lowerCAmelCase , """batch""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = state.apply_gradients(grads=__lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ : List[str] = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ : Dict = state.apply_fn(**__lowerCAmelCase , params=state.params , train=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs
SCREAMING_SNAKE_CASE__ : Any = state.loss_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __a (train_state.TrainState):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Callable = struct.field(pytree_node=UpperCamelCase_)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Args
_SCREAMING_SNAKE_CASE :Callable
_SCREAMING_SNAKE_CASE :Callable
_SCREAMING_SNAKE_CASE :Callable
_SCREAMING_SNAKE_CASE :Callable
_SCREAMING_SNAKE_CASE :wandb
_SCREAMING_SNAKE_CASE :Callable = None
def _a ( self , _a , _a , _a , _a=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model.params
SCREAMING_SNAKE_CASE__ : Any = TrainState.create(
apply_fn=model.__call__ , params=_a , tx=_a , loss_fn=_a , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = restore_checkpoint(_a , _a )
SCREAMING_SNAKE_CASE__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = build_tx(**_a )
SCREAMING_SNAKE_CASE__ : int = train_state.TrainState(
step=_a , apply_fn=model.__call__ , params=_a , tx=_a , opt_state=_a , )
SCREAMING_SNAKE_CASE__ : int = args
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_collator
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lr
SCREAMING_SNAKE_CASE__ : int = params
SCREAMING_SNAKE_CASE__ : str = jax_utils.replicate(_a )
return state
def _a ( self , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.args
SCREAMING_SNAKE_CASE__ : int = len(_a ) // args.batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.random.split(_a , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : int = get_batched_dataset(_a , args.batch_size , seed=_a )
SCREAMING_SNAKE_CASE__ : int = 0
for batch in tqdm(_a , total=_a , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.data_collator(_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.train_step_fn(_a , _a , **_a )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ : Any = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ : Optional[Any] = running_loss.item() / i
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.evaluate(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_a ) )
self.logger.log(_a , commit=_a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=_a )
def _a ( self , _a , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_batched_dataset(_a , self.args.batch_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_a ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for batch in tqdm(_a , total=_a , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.data_collator(_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.val_step_fn(_a , **_a )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _a ( self , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = jax_utils.unreplicate(_a )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(_a , params=state.params )
with open(os.path.join(_a , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_a , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_a , """data_collator.joblib""" ) )
with open(os.path.join(_a , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _a )
print("""DONE""" )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(__lowerCAmelCase , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCAmelCase , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ : Any = joblib.load(os.path.join(__lowerCAmelCase , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ : Dict = joblib.load(os.path.join(__lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(__lowerCAmelCase , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : int = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=__lowerCAmelCase , transition_steps=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=1E-7 , transition_steps=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
def weight_decay_mask(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = traverse_util.flatten_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = optax.adamw(learning_rate=__lowerCAmelCase , weight_decay=__lowerCAmelCase , mask=__lowerCAmelCase )
return tx, lr
| 12
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_SCREAMING_SNAKE_CASE :str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowercase ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
datasets.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE__ : str = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = train_dataset.features["""label"""].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : int = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE__ : int = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.features["""label"""].names
# Labels
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE__ : str = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def preprocess_function(__lowerCAmelCase ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__ : str = train_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Any = min(len(__lowerCAmelCase ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : List[str] = eval_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE__ : int = min(len(__lowerCAmelCase ) , data_args.max_predict_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = predict_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
SCREAMING_SNAKE_CASE__ : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE__ : List[Any] = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE__ : int = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Dict = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_checkpoint
SCREAMING_SNAKE_CASE__ : str = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = train_result.metrics
SCREAMING_SNAKE_CASE__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : Dict = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCAmelCase )
trainer.save_metrics("""train""" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE__ : Any = trainer.evaluate(eval_dataset=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""eval""" , __lowerCAmelCase )
trainer.save_metrics("""eval""" , __lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = trainer.predict(__lowerCAmelCase , metric_key_prefix="""predict""" )
SCREAMING_SNAKE_CASE__ : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""predict""" , __lowerCAmelCase )
trainer.save_metrics("""predict""" , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = np.argmax(__lowerCAmelCase , axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 12
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
lowerCamelCase__ = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Let's go
lowerCamelCase__ = parser.parse_args()
if not hasattr(__SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
lowerCamelCase__ = args.func(__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 165
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCAmelCase_ = trt.Logger(trt.Logger.WARNING)
lowerCAmelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCAmelCase_ = parser.parse_args()
if args.tokenizer_name:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCAmelCase_ = args.per_device_eval_batch_size
lowerCAmelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCAmelCase_ = True
lowerCAmelCase_ = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCAmelCase_ = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCAmelCase_ = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCAmelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCAmelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCAmelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCAmelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCAmelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCAmelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Tuple = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : List[str] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __SCREAMING_SNAKE_CASE )
# start time
_SCREAMING_SNAKE_CASE : int = time.time()
# Run inference
context.execute_async(
bindings=[int(__SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(__SCREAMING_SNAKE_CASE ), int(__SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
_SCREAMING_SNAKE_CASE : int = time.time()
_SCREAMING_SNAKE_CASE : List[str] = end_time - start_time
_SCREAMING_SNAKE_CASE : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCAmelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCAmelCase_ = raw_datasets['''validation'''].column_names
lowerCAmelCase_ = '''question''' if '''question''' in column_names else column_names[0]
lowerCAmelCase_ = '''context''' if '''context''' in column_names else column_names[1]
lowerCAmelCase_ = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCAmelCase_ = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
lowerCAmelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_SCREAMING_SNAKE_CASE : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_SCREAMING_SNAKE_CASE : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_SCREAMING_SNAKE_CASE : Dict = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_SCREAMING_SNAKE_CASE : str = tokenized_examples.sequence_ids(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_SCREAMING_SNAKE_CASE : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCAmelCase_ = raw_datasets['''validation''']
# Validation Feature Creation
lowerCAmelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCAmelCase_ = default_data_collator
lowerCAmelCase_ = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCAmelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="eval" )-> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
_SCREAMING_SNAKE_CASE : List[Any] = postprocess_qa_predictions(
examples=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , predictions=__SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_SCREAMING_SNAKE_CASE : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
_SCREAMING_SNAKE_CASE : Any = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
_SCREAMING_SNAKE_CASE : Tuple = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__SCREAMING_SNAKE_CASE , label_ids=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple:
return trt.volume(engine.get_binding_shape(__SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(__SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
lowerCAmelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes)
lowerCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCAmelCase_ = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 0
lowerCAmelCase_ = timeit.default_timer()
lowerCAmelCase_ = None
for step, batch in enumerate(eval_dataloader):
lowerCAmelCase_ , lowerCAmelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCAmelCase_ , lowerCAmelCase_ = outputs
lowerCAmelCase_ = torch.tensor(start_logits)
lowerCAmelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCAmelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCAmelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCAmelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCAmelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCAmelCase_ = nested_truncate(all_preds, len(eval_dataset))
lowerCAmelCase_ = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
lowerCAmelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCAmelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 338
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a__ : Tuple = '''src/transformers'''
a__ : Dict = '''docs/source/en/tasks'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a__ : int = direct_transformers_import(TRANSFORMERS_PATH)
a__ : int = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a__ : Optional[int] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TASK_GUIDE_TO_MODELS[task_guide]
__SCREAMING_SNAKE_CASE = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(SCREAMING_SNAKE_CASE_ , set() )
__SCREAMING_SNAKE_CASE = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__SCREAMING_SNAKE_CASE = get_model_list_for_task(SCREAMING_SNAKE_CASE_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 702
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
raise NotImplementedError()
| 553
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "new-model"
if is_tf_available():
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = NewModelConfig
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase = """bert-base-cased"""
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> int:
lowerCAmelCase = """bert-base-cased"""
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(_lowerCAmelCase )
lowerCAmelCase, lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(_lowerCAmelCase )
lowerCAmelCase, lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowerCAmelCase, lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> Dict:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __snake_case ( self ) -> List[Any]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_tensorflow_probability
def __snake_case ( self ) -> Union[str, Any]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(_lowerCAmelCase )
lowerCAmelCase, lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCAmelCase ) , 1_4410 )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCAmelCase ) , 1_4410 )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = copy.deepcopy(model.config )
lowerCAmelCase = ["""FunnelBaseModel"""]
lowerCAmelCase = TFAutoModel.from_config(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowerCAmelCase )
lowerCAmelCase = TFAutoModel.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( self ) -> str:
try:
AutoConfig.register("""new-model""" , _lowerCAmelCase )
lowerCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_lowerCAmelCase ):
auto_class.register(_lowerCAmelCase , _lowerCAmelCase )
auto_class.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
auto_class.register(_lowerCAmelCase , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase = BertModelTester(self ).get_config()
lowerCAmelCase = NewModelConfig(**tiny_config.to_dict() )
lowerCAmelCase = auto_class.from_config(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowerCAmelCase )
lowerCAmelCase = auto_class.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __snake_case ( self ) -> Dict:
with self.assertRaisesRegex(
_lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase = TFAutoModel.from_pretrained("""bert-base""" )
def __snake_case ( self ) -> Any:
with self.assertRaisesRegex(
_lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase = TFAutoModel.from_pretrained(_lowerCAmelCase , revision="""aaaaaa""" )
def __snake_case ( self ) -> Optional[int]:
with self.assertRaisesRegex(
_lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __snake_case ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(_lowerCAmelCase , """Use `from_pt=True` to load this model""" ):
lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowerCAmelCase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCAmelCase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
lowerCAmelCase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 433
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = tf.data.AUTOTUNE
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_lowerCAmelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_lowerCAmelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_lowerCAmelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_lowerCAmelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_lowerCAmelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_lowerCAmelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_lowerCAmelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_lowerCAmelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_lowerCAmelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_lowerCAmelCase , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_lowerCAmelCase , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_lowerCAmelCase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_lowerCAmelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_lowerCAmelCase , help='Model ID to upload to on the Hugging Face Hub.' )
__lowercase =parser.parse_args()
return args
def _A ( _lowerCAmelCase ):
"""simple docstring"""
try:
if args.tpu_name:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_lowerCAmelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCAmelCase )
return tpu
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
for file in file_list:
__lowercase =file.split('/' )[-1]
__lowercase =re.search(r'-\d+-(\d+)\.tfrecord' , _lowerCAmelCase ).group(1 )
__lowercase =int(_lowerCAmelCase )
num_samples += sample_count
return num_samples
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =tf.data.Dataset.from_tensor_slices(_lowerCAmelCase )
if shuffle:
__lowercase =dataset.shuffle(len(_lowerCAmelCase ) )
__lowercase =tf.data.TFRecordDataset(_lowerCAmelCase , num_parallel_reads=_lowerCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase =dataset.apply(tf.data.experimental.assert_cardinality(_lowerCAmelCase ) )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase =dataset.shuffle(args.shuffle_buffer_size )
__lowercase =dataset.batch(_lowerCAmelCase , drop_remainder=_lowerCAmelCase )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
__lowercase =dataset.prefetch(_lowerCAmelCase )
return dataset
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not args.no_tpu:
__lowercase =initialize_tpu(_lowerCAmelCase )
__lowercase =tf.distribute.TPUStrategy(_lowerCAmelCase )
else:
__lowercase =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__lowercase =AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase =AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase =tokenizer.vocab_size
__lowercase =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
__lowercase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase =steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase =TFAutoModelForMaskedLM.from_config(_lowerCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase , __lowercase =create_optimizer(
num_train_steps=_lowerCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCAmelCase , metrics=['accuracy'] )
def decode_fn(_lowerCAmelCase ):
__lowercase ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCAmelCase , _lowerCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase =DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm_probability=args.mlm_probability , mlm=_lowerCAmelCase , return_tensors='tf' )
def mask_with_collator(_lowerCAmelCase ):
# TF really needs an isin() function
__lowercase =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__lowercase , __lowercase =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_lowerCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCAmelCase , )
return batch
__lowercase =args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , )
__lowercase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCAmelCase ) )
model.fit(
_lowerCAmelCase , validation_data=_lowerCAmelCase , epochs=args.num_epochs , callbacks=_lowerCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase = parse_args()
main(args)
| 474
| 0
|
import random
from typing import Any
def __UpperCamelCase ( lowerCAmelCase__ : list ):
'''simple docstring'''
for _ in range(len(UpperCamelCase__ ) ):
__a : List[str] = random.randint(0 , len(UpperCamelCase__ ) - 1 )
__a : List[Any] = random.randint(0 , len(UpperCamelCase__ ) - 1 )
__a , __a : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
lowercase__ =[0, 1, 2, 3, 4, 5, 6, 7]
lowercase__ =['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 715
|
from manim import *
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Any ):
__a : Dict = Rectangle(height=0.5 , width=0.5 )
__a : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Union[str, Any] = Text('''CPU''' , font_size=2_4 )
__a : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
__a : int = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = Text('''GPU''' , font_size=2_4 )
__a : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
__a : str = [mem.copy() for i in range(6 )]
__a : Optional[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Optional[Any] = Text('''Model''' , font_size=2_4 )
__a : List[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
__a : Dict = []
for i, rect in enumerate(snake_case_ ):
rect.set_stroke(snake_case_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__a : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=snake_case_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=snake_case_ , buff=0.0 )
self.add(snake_case_ )
cpu_targs.append(snake_case_ )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Optional[int] = Text('''Loaded Checkpoint''' , font_size=2_4 )
__a : str = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , aligned_edge=snake_case_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : str = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
__a : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__a : int = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) , Write(snake_case_ ) )
self.play(Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) )
__a : int = []
__a : int = []
for i, rect in enumerate(snake_case_ ):
__a : Tuple = fill.copy().set_fill(snake_case_ , opacity=0.7 )
target.move_to(snake_case_ )
first_animations.append(GrowFromCenter(snake_case_ , run_time=1 ) )
__a : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 326
| 0
|
from pathlib import Path
import fire
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = Path(_a)
SCREAMING_SNAKE_CASE : Dict = Path(_a)
dest_dir.mkdir(exist_ok=_a)
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE : List[Any] = [x.rstrip() for x in list(path.open().readlines())][:n]
SCREAMING_SNAKE_CASE : str = dest_dir.joinpath(path.name)
print(_a)
dest_path.open("w").write("\n".join(_a))
if __name__ == "__main__":
fire.Fire(minify)
| 25
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Optional[int] = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = 'encodec'
def __init__( self : List[Any] , __a : Any=[1.5, 3.0, 6.0, 12.0, 24.0] , __a : Any=2_4_0_0_0 , __a : str=1 , __a : Optional[Any]=False , __a : Optional[Any]=None , __a : Any=None , __a : Dict=1_2_8 , __a : Union[str, Any]=3_2 , __a : Optional[Any]=1 , __a : Union[str, Any]=[8, 5, 4, 2] , __a : Tuple="weight_norm" , __a : Union[str, Any]=7 , __a : Tuple=7 , __a : List[Any]=3 , __a : List[str]=2 , __a : List[Any]=True , __a : str="reflect" , __a : int=2 , __a : Dict=2 , __a : str=1.0 , __a : Dict=1_0_2_4 , __a : str=None , __a : str=True , **__a : List[str] , ):
snake_case__ : List[str] = target_bandwidths
snake_case__ : Union[str, Any] = sampling_rate
snake_case__ : str = audio_channels
snake_case__ : List[str] = normalize
snake_case__ : Any = chunk_length_s
snake_case__ : Optional[int] = overlap
snake_case__ : Any = hidden_size
snake_case__ : List[str] = num_filters
snake_case__ : Union[str, Any] = num_residual_layers
snake_case__ : Optional[Any] = upsampling_ratios
snake_case__ : Dict = norm_type
snake_case__ : List[str] = kernel_size
snake_case__ : Tuple = last_kernel_size
snake_case__ : List[str] = residual_kernel_size
snake_case__ : int = dilation_growth_rate
snake_case__ : Optional[Any] = use_causal_conv
snake_case__ : Optional[Any] = pad_mode
snake_case__ : int = compress
snake_case__ : str = num_lstm_layers
snake_case__ : Tuple = trim_right_ratio
snake_case__ : Union[str, Any] = codebook_size
snake_case__ : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
snake_case__ : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__a )
@property
def lowercase ( self : Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase ( self : Dict ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase ( self : str ):
snake_case__ : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase ( self : Union[str, Any] ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 703
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_: Union[str, Any] = '<<<<<<< This should probably be modified because it mentions: '
lowercase_: Optional[Any] = '=======\n>>>>>>>\n'
lowercase_: List[Any] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowercase_: str = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory)
class lowercase__ (__snake_case ):
"""simple docstring"""
@staticmethod
def lowercase ( __a : ArgumentParser ):
snake_case__ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=__a , required=__a , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=__a , required=__a , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__a )
def __init__( self : Tuple , __a : str , __a : str , *__a : Tuple ):
snake_case__ : Union[str, Any] = get_logger("""datasets-cli/converting""" )
snake_case__ : Dict = tfds_path
snake_case__ : Tuple = datasets_directory
def lowercase ( self : Any ):
if os.path.isdir(self._tfds_path ):
snake_case__ : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
snake_case__ : str = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = {}
if os.path.isdir(self._tfds_path ):
snake_case__ : List[str] = os.listdir(__a )
else:
snake_case__ : Any = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
snake_case__ : List[Any] = os.path.join(__a , __a )
snake_case__ : str = os.path.join(__a , __a )
if not os.path.isfile(__a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__a , encoding="""utf-8""" ) as f:
snake_case__ : Dict = f.readlines()
snake_case__ : int = []
snake_case__ : List[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = []
for line in lines:
snake_case__ : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case__ : List[str] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
snake_case__ : Dict = """"""
continue
elif "from absl import logging" in out_line:
snake_case__ : str = """from datasets import logging\n"""
elif "getLogger" in out_line:
snake_case__ : str = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case__ : List[str] = True
snake_case__ : List[str] = list(filter(lambda __a : e in out_line , __a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__a ) + """\n""" )
out_lines.append(__a )
out_lines.append(__a )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case__ : Tuple = re.sub(__a , __a , __a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case__ : int = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
snake_case__ : Tuple = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case__ : List[str] = True
out_lines.append(__a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case__ : str = f_name.replace(""".py""" , """""" )
snake_case__ : List[Any] = os.path.join(__a , __a )
snake_case__ : Dict = os.path.join(__a , __a )
os.makedirs(__a , exist_ok=__a )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__a )
if needs_manual_update:
with_manual_update.append(__a )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.writelines(__a )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
snake_case__ : Any = os.path.basename(__a )
snake_case__ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(__a , __a )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 127
| 0
|
__lowercase = 8.3_1_4_4_5_9_8
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowercase = 300
__lowercase = 28
__lowercase = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 167
|
import argparse
import json
import subprocess
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = []
__UpperCamelCase :Dict = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__UpperCamelCase :Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE , shell=SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
__UpperCamelCase :Union[str, Any] = output.stdout.decode('''utf-8''' )
__UpperCamelCase :List[Any] = json.loads(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCamelCase :Union[str, Any] = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return values.split(''',''' )
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__lowercase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 167
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__UpperCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__UpperCAmelCase = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__UpperCAmelCase = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCAmelCase = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__UpperCAmelCase = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , UpperCamelCase_ )
return [m.group(0 ) for m in matches]
def _lowercase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE__ = collections.defaultdict(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = collections.defaultdict(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = collections.defaultdict(UpperCamelCase_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = None
if _re_tf_models.match(UpperCamelCase_ ) is not None:
SCREAMING_SNAKE_CASE__ = tf_models
SCREAMING_SNAKE_CASE__ = _re_tf_models.match(UpperCamelCase_ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase_ ) is not None:
SCREAMING_SNAKE_CASE__ = flax_models
SCREAMING_SNAKE_CASE__ = _re_flax_models.match(UpperCamelCase_ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase_ ) is not None:
SCREAMING_SNAKE_CASE__ = pt_models
SCREAMING_SNAKE_CASE__ = _re_pt_models.match(UpperCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase_ ) > 0:
if attr_name in model_prefix_to_model_type:
SCREAMING_SNAKE_CASE__ = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE__ = ''.join(camel_case_split(UpperCamelCase_ )[:-1] )
SCREAMING_SNAKE_CASE__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
SCREAMING_SNAKE_CASE__ = list(UpperCamelCase_ )
all_models.sort()
SCREAMING_SNAKE_CASE__ = {'model_type': all_models}
SCREAMING_SNAKE_CASE__ = [pt_models[t] for t in all_models]
SCREAMING_SNAKE_CASE__ = [tf_models[t] for t in all_models]
SCREAMING_SNAKE_CASE__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
SCREAMING_SNAKE_CASE__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE__ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
SCREAMING_SNAKE_CASE__ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE__ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
SCREAMING_SNAKE_CASE__ = 'AutoTokenizer'
SCREAMING_SNAKE_CASE__ = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
SCREAMING_SNAKE_CASE__ = [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
SCREAMING_SNAKE_CASE__ = [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase_ , UpperCamelCase_ ):
continue
# First extract all model_names
SCREAMING_SNAKE_CASE__ = []
for name in getattr(UpperCamelCase_ , UpperCamelCase_ ).values():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
model_names.append(UpperCamelCase_ )
else:
model_names.extend(list(UpperCamelCase_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_frameworks_table()
SCREAMING_SNAKE_CASE__ = Dataset.from_pandas(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Dataset.from_json(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(UpperCamelCase_ ) )
}
SCREAMING_SNAKE_CASE__ = update_pipeline_and_auto_class_table(UpperCamelCase_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
SCREAMING_SNAKE_CASE__ = sorted(table.keys() )
SCREAMING_SNAKE_CASE__ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
SCREAMING_SNAKE_CASE__ = Dataset.from_pandas(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase_ , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase_ , 'pipeline_tags.json' ) )
if commit_sha is not None:
SCREAMING_SNAKE_CASE__ = (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
SCREAMING_SNAKE_CASE__ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=UpperCamelCase_ , repo_type='dataset' , token=UpperCamelCase_ , commit_message=UpperCamelCase_ , )
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
SCREAMING_SNAKE_CASE__ = transformers_module.pipelines.SUPPORTED_TASKS
SCREAMING_SNAKE_CASE__ = []
for key in pipeline_tasks:
if key not in in_table:
SCREAMING_SNAKE_CASE__ = pipeline_tasks[key]['pt']
if isinstance(UpperCamelCase_ , (list, tuple) ):
SCREAMING_SNAKE_CASE__ = model[0]
SCREAMING_SNAKE_CASE__ = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = ', '.join(UpperCamelCase_ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
__UpperCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 717
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict =["""input_features""", """is_longer"""]
def __init__( self : Any , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : List[str]=48000 , UpperCAmelCase_ : List[Any]=480 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 14000 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = "fusion" , UpperCAmelCase_ : str = "repeatpad" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = top_db
SCREAMING_SNAKE_CASE__ = truncation
SCREAMING_SNAKE_CASE__ = padding
SCREAMING_SNAKE_CASE__ = fft_window_size
SCREAMING_SNAKE_CASE__ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = max_length_s
SCREAMING_SNAKE_CASE__ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = frequency_min
SCREAMING_SNAKE_CASE__ = frequency_max
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='htk' , )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='slaney' , mel_scale='slaney' , )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE__ = spectrogram(
UpperCAmelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE__ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
UpperCAmelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) - max_length
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE__ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE__ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE__ = False
else:
SCREAMING_SNAKE_CASE__ = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
SCREAMING_SNAKE_CASE__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE__ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE__ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE__ = [
self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase_ )
is_longer.append(UpperCAmelCase_ )
if truncation == "fusion" and sum(UpperCAmelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = True
if isinstance(input_mel[0] , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE__ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE__ = {'input_features': input_mel, 'is_longer': is_longer}
SCREAMING_SNAKE_CASE__ = BatchFeature(UpperCAmelCase_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = input_features.convert_to_tensors(UpperCAmelCase_ )
return input_features
| 400
| 0
|
def SCREAMING_SNAKE_CASE__ ( _lowercase : str , _lowercase : str ) -> bool:
'''simple docstring'''
lowercase__ : Optional[Any] = len(_lowercase ) + 1
lowercase__ : List[str] = len(_lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : Union[str, Any] = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
# since string of zero length match pattern of zero length
lowercase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowercase ):
lowercase__ : Union[str, Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowercase ):
lowercase__ : Optional[Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowercase ):
for j in range(1 , _lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : int = dp[i - 1][j]
else:
lowercase__ : List[str] = 0
else:
lowercase__ : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCamelCase: Any = """aab"""
__UpperCamelCase: List[Any] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 266
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_lowercase , default=_lowercase , required=_lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_lowercase , default=0 , help='cuda_id.' , )
lowercase__ : Any = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
if not len(_lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowercase__ , lowercase__ : Any = imgs[0].size
lowercase__ : int = Image.new('RGB' , size=(cols * w, rows * h) )
lowercase__ , lowercase__ : Any = grid.size
for i, img in enumerate(_lowercase ):
grid.paste(_lowercase , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[int] , _lowercase : Tuple="robotic cat with wings" , _lowercase : Optional[Any]=7.5 , _lowercase : int=50 , _lowercase : str=1 , _lowercase : Any=42 , ) -> str:
'''simple docstring'''
lowercase__ : int = torch.Generator(pipeline.device ).manual_seed(_lowercase )
lowercase__ : Any = pipeline(
_lowercase , guidance_scale=_lowercase , num_inference_steps=_lowercase , generator=_lowercase , num_images_per_prompt=_lowercase , ).images
lowercase__ : Dict = int(math.sqrt(_lowercase ) )
lowercase__ : int = image_grid(_lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase: int = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase: Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
__UpperCamelCase: List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
__UpperCamelCase: int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
__UpperCamelCase: int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
__UpperCamelCase: Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase: List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
__UpperCamelCase: Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
__UpperCamelCase: Dict = unet.to(torch.device("""cuda""", args.cuda_id))
__UpperCamelCase: Optional[int] = pipeline.to(unet.device)
__UpperCamelCase, __UpperCamelCase: Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
__UpperCamelCase: List[str] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 266
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Dict = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 1
|
"""simple docstring"""
import baseaa
def _snake_case ( _snake_case : str ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def _snake_case ( _snake_case : bytes ) -> str:
'''simple docstring'''
return baseaa.aaadecode(_snake_case ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = embeddings_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = len(__A )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = TFRegNetModel(config=__A )
lowerCAmelCase = model(__A , training=__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFRegNetForImageClassification(__A )
lowerCAmelCase = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[str] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : str = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Dict = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFRegNetModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__A , has_text_modality=__A )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__A )
lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = model_class(__A )
lowerCAmelCase = model(**self._prepare_for_class(__A , __A ) , training=__A )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase = layer_type
lowerCAmelCase = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(__A , __A , __A )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
lowerCAmelCase = model(__A , return_dict=__A , **__A )
lowerCAmelCase = model(__A , return_dict=__A , **__A ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(__A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A , __A ):
recursive_check(__A , __A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__A , __A ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(__A , __A )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__A )
lowerCAmelCase = self._prepare_for_class(__A , __A )
lowerCAmelCase = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A )
lowerCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A )
lowerCAmelCase = self._prepare_for_class(__A , __A )
lowerCAmelCase = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A , {'output_hidden_states': True} )
lowerCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A , {'output_hidden_states': True} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFRegNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__A , return_tensors='tf' )
# forward pass
lowerCAmelCase = model(**__A , training=__A )
# verify the logits
lowerCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
| 703
|
'''simple docstring'''
def snake_case ( snake_case : int ) -> int:
"""simple docstring"""
assert (
isinstance(snake_case , snake_case ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowerCAmelCase , lowerCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase , lowerCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
_UpperCAmelCase = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(snake_case ):
os.makedirs(snake_case )
_UpperCAmelCase = model.state_dict()
def to_tf_var_name(snake_case ):
for patt, repl in iter(snake_case ):
_UpperCAmelCase = name.replace(snake_case , snake_case )
return f"bert/{name}"
def create_tf_var(snake_case , snake_case , snake_case ):
_UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
_UpperCAmelCase = tf.get_variable(dtype=snake_case , shape=tensor.shape , name=snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_UpperCAmelCase = to_tf_var_name(snake_case )
_UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_UpperCAmelCase = torch_tensor.T
_UpperCAmelCase = create_tf_var(tensor=snake_case , name=snake_case , session=snake_case )
tf.keras.backend.set_value(snake_case , snake_case )
_UpperCAmelCase = session.run(snake_case )
print(f"Successfully created {tf_name}: {np.allclose(snake_case , snake_case )}" )
_UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case , os.path.join(snake_case , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def _SCREAMING_SNAKE_CASE ( snake_case=None ) -> Optional[int]:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case , required=snake_case , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=snake_case , default=snake_case , required=snake_case , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=snake_case , required=snake_case , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=snake_case , required=snake_case , help="""Directory in which to save tensorflow model""" )
_UpperCAmelCase = parser.parse_args(snake_case )
_UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 518
|
def _SCREAMING_SNAKE_CASE ( snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = []
for i in range(1 , n + 1 ):
_UpperCAmelCase = prev_numerator + 2 * prev_denominator
_UpperCAmelCase = prev_numerator + prev_denominator
if len(str(snake_case ) ) > len(str(snake_case ) ):
result.append(snake_case )
_UpperCAmelCase = numerator
_UpperCAmelCase = denominator
return len(snake_case )
if __name__ == "__main__":
print(F'{solution() = }')
| 518
| 1
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = '<<<<<<< This should probably be modified because it mentions: '
lowercase_ = '=======\n>>>>>>>\n'
lowercase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case ( __lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ):
'''simple docstring'''
__A = parser.add_parser(
'''convert''', help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''', )
train_parser.add_argument(
'''--tfds_path''', type=lowerCAmelCase_, required=lowerCAmelCase_, help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''', )
train_parser.add_argument(
'''--datasets_directory''', type=lowerCAmelCase_, required=lowerCAmelCase_, help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : str, _lowerCamelCase : Any, _lowerCamelCase : List[str], *_lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = get_logger('''datasets-cli/converting''' )
__A = tfds_path
__A = datasets_directory
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__A = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__A = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__A = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__A = []
__A = []
__A = {}
if os.path.isdir(self._tfds_path ):
__A = os.listdir(lowerCAmelCase_ )
else:
__A = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__A = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__A = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
if not os.path.isfile(lowerCAmelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(lowerCAmelCase_, encoding='''utf-8''' ) as f:
__A = f.readlines()
__A = []
__A = False
__A = False
__A = []
for line in lines:
__A = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__A = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__A = ''''''
continue
elif "from absl import logging" in out_line:
__A = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__A = out_line.replace('''getLogger''', '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__A = True
__A = list(filter(lambda _lowerCamelCase : e in out_line, lowerCAmelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase_ ) + '''\n''' )
out_lines.append(lowerCAmelCase_ )
out_lines.append(lowerCAmelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
__A = re.sub(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__A = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''', lowerCAmelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__A = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__A = True
out_lines.append(lowerCAmelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__A = f_name.replace('''.py''', '''''' )
__A = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__A = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase_ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase_ )
with open(lowerCAmelCase_, '''w''', encoding='''utf-8''' ) as f:
f.writelines(lowerCAmelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__A = os.path.basename(lowerCAmelCase_ )
__A = imports_to_builder_map[f_name.replace('''.py''', '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase_, lowerCAmelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 717
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=_lowerCAmelCase ):
'''simple docstring'''
A_ : str = ["torch", "scipy"]
def __init__( self : Union[str, Any], *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self, ['''torch''', '''scipy'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple, *_lowerCamelCase : List[str], **_lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''torch''', '''scipy'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int], *_lowerCamelCase : Optional[int], **_lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls, ['''torch''', '''scipy'''] )
| 215
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ["""model.decoder.embed_positions.weights"""]
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
if "emb" in name:
lowerCAmelCase__ : List[str] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowerCAmelCase__ : Optional[int] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowerCAmelCase__ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowerCAmelCase__ : Any = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowerCAmelCase__ : int = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowerCAmelCase__ : List[Any] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowerCAmelCase__ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowerCAmelCase__ : List[Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase__ : int = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCAmelCase__ : Optional[int] = list(state_dict.keys() )
lowerCAmelCase__ : str = {}
for key in keys:
lowerCAmelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Any = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase__ : Any = val[:hidden_size, :]
lowerCAmelCase__ : List[Any] = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase__ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase__ : Any = val
else:
lowerCAmelCase__ : int = val
return state_dict, enc_dec_proj_state_dict
def __lowerCAmelCase ( UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCAmelCase__ : Any = 1024
lowerCAmelCase__ : int = 24
lowerCAmelCase__ : Tuple = 16
elif checkpoint == "medium":
lowerCAmelCase__ : Optional[int] = 1536
lowerCAmelCase__ : int = 48
lowerCAmelCase__ : Optional[Any] = 24
elif checkpoint == "large":
lowerCAmelCase__ : List[Any] = 2048
lowerCAmelCase__ : List[str] = 48
lowerCAmelCase__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowerCAmelCase__ : Dict = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCAmelCase__ : List[str] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCAmelCase__ : List[str] = fairseq_model.lm.state_dict()
lowerCAmelCase__ , lowerCAmelCase__ : Dict = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCAmelCase__ : Tuple = TaEncoderModel.from_pretrained('''t5-base''' )
lowerCAmelCase__ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowerCAmelCase__ : Union[str, Any] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowerCAmelCase__ : List[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCAmelCase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCAmelCase__ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained('''t5-base''' )
lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowerCAmelCase__ : Tuple = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCAmelCase__ : Union[str, Any] = 2048
lowerCAmelCase__ : List[Any] = 2048
# set other default generation config params
lowerCAmelCase__ : List[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 678
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678
| 1
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE( snake_case_ : list[int] ) ->bool:
'''simple docstring'''
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] , snake_case_ : Dict ) ->List[Any]:
'''simple docstring'''
if args.student_type == "roberta":
_lowercase : List[str] = False
elif args.student_type == "gpt2":
_lowercase : List[Any] = False
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : List[Any] ) ->Any:
'''simple docstring'''
if args.student_type == "roberta":
_lowercase : Optional[int] = False
def _SCREAMING_SNAKE_CASE( ) ->Any:
'''simple docstring'''
_lowercase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=40_00 , help='''Checkpoint interval.''' )
_lowercase : List[str] = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
_lowercase , _lowercase , _lowercase : Dict = MODEL_CLASSES[args.student_type]
_lowercase , _lowercase , _lowercase : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowercase : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowercase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowercase : Optional[Any] = tokenizer.all_special_tokens.index(snake_case_ )
_lowercase : Any = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
_lowercase : Union[str, Any] = special_tok_ids
_lowercase : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
_lowercase : List[Any] = pickle.load(snake_case_ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
_lowercase : Any = pickle.load(snake_case_ )
_lowercase : List[str] = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowercase : Any = 0.0 # do not predict special tokens
_lowercase : Dict = torch.from_numpy(snake_case_ )
else:
_lowercase : str = None
_lowercase : str = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
_lowercase : str = student_config_class.from_pretrained(args.student_config )
_lowercase : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
_lowercase : int = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
_lowercase : Optional[int] = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
_lowercase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowercase : int = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 411
| 1
|
from functools import lru_cache
def _lowerCAmelCase ( A__ ):
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A__ )
if n > 1:
factors.add(A__ )
return factors
@lru_cache
def _lowerCAmelCase ( A__ ):
return len(unique_prime_factors(A__ ) )
def _lowerCAmelCase ( A__ ):
return len(set(A__ ) ) in (0, 1)
def _lowerCAmelCase ( A__ ):
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(A__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(A__ ) for x in group]
checker.append(A__ )
# If all numbers in the list are equal, return the group variable.
if equality(A__ ):
return group
# Increment our base variable by 1
base += 1
def _lowerCAmelCase ( A__ = 4 ):
lowercase__ = run(A__ )
return results[0] if len(A__ ) else None
if __name__ == "__main__":
print(solution())
| 622
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCAmelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _lowerCAmelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _lowerCAmelCase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('https://huggingface.co' )
| 622
| 1
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def snake_case_ ( _lowerCAmelCase : Dict ) -> int:
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Optional[Any] ) -> int:
UpperCAmelCase : Tuple = metric_id
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def A ( self : Tuple ) -> Dict:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ) -> str:
if "tmp_path" in args:
UpperCAmelCase : Any = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(a_ , match='''https://huggingface.co/docs/evaluate''' ):
func(*a_ )
| 705
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
UpperCAmelCase : Optional[Any] = [False] * len(_lowerCAmelCase )
UpperCAmelCase : Dict = []
queue.append(_lowerCAmelCase )
UpperCAmelCase : str = True
while queue:
UpperCAmelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[int] = u
return visited[t]
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> Any:
# This array is filled by BFS and to store path
UpperCAmelCase : Dict = [-1] * (len(_lowerCAmelCase ))
UpperCAmelCase : Any = 0
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : List[str] = float('''Inf''' )
UpperCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : int = min(_lowerCAmelCase , graph[parent[s]][s] )
UpperCAmelCase : Tuple = parent[s]
max_flow += path_flow
UpperCAmelCase : Optional[Any] = sink
while v != source:
UpperCAmelCase : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Dict = parent[v]
return max_flow
UpperCamelCase__: Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__ , UpperCamelCase__: int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 528
| 0
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase: Union[str, Any] ={
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __snake_case ( __A ) -> str:
lowercase : str = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__A ,__A )
lowerCAmelCase: Any ={
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __snake_case ( __A ) -> List[Any]:
lowercase : Dict = list(s_dict.keys() )
for key in keys:
lowercase : List[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase : List[str] = new_key.replace(__A ,__A )
print(F'''{key} -> {new_key}''' )
lowercase : Any = s_dict.pop(__A )
return s_dict
def __snake_case ( __A ) -> Any:
lowercase , lowercase : Optional[Any] = emb.weight.shape
lowercase : int = nn.Linear(__A ,__A ,bias=__A )
lowercase : int = emb.weight.data
return lin_layer
def __snake_case ( __A ,__A ) -> Tuple:
os.makedirs(__A ,exist_ok=__A )
lowercase : Any = os.path.basename(__A )
lowercase : int = url.split("""/""" )[-2]
lowercase : int = os.path.join(__A ,__A )
if os.path.exists(__A ) and not os.path.isfile(__A ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__A ):
lowercase : List[Any] = open(__A ,"""rb""" ).read()
if hashlib.shaaaa(__A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__A ) as source, open(__A ,"""wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) ,ncols=80 ,unit="""iB""" ,unit_scale=__A ,unit_divisor=1024 ) as loop:
while True:
lowercase : int = source.read(8192 )
if not buffer:
break
output.write(__A )
loop.update(len(__A ) )
lowercase : List[Any] = open(__A ,"""rb""" ).read()
if hashlib.shaaaa(__A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __snake_case ( __A ,__A ) -> Tuple:
if ".pt" not in checkpoint_path:
lowercase : Dict = _download(_MODELS[checkpoint_path] )
else:
lowercase : str = torch.load(__A ,map_location="""cpu""" )
lowercase : Dict = original_checkpoint["""dims"""]
lowercase : List[Any] = original_checkpoint["""model_state_dict"""]
lowercase : int = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__A )
rename_keys(__A )
lowercase : int = True
lowercase : Optional[int] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowercase : Tuple = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] ,encoder_ffn_dim=__A ,decoder_ffn_dim=__A ,num_mel_bins=dimensions["""n_mels"""] ,d_model=dimensions["""n_audio_state"""] ,max_target_positions=dimensions["""n_text_ctx"""] ,encoder_layers=dimensions["""n_audio_layer"""] ,encoder_attention_heads=dimensions["""n_audio_head"""] ,decoder_layers=dimensions["""n_text_layer"""] ,decoder_attention_heads=dimensions["""n_text_state"""] ,max_source_positions=dimensions["""n_audio_ctx"""] ,)
lowercase : Tuple = WhisperForConditionalGeneration(__A )
lowercase , lowercase : int = model.model.load_state_dict(__A ,strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
lowercase : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase : Dict = proj_out_weights
model.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] =argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase: Any =parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 607
|
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ :list[int | str] ):
create_state_space_tree(__magic_name__ , [] , 0 , [0 for i in range(len(__magic_name__ ) )] )
def _lowerCAmelCase ( __magic_name__ :list[int | str] , __magic_name__ :list[int | str] , __magic_name__ :int , __magic_name__ :list[int] , ):
if index == len(__magic_name__ ):
print(__magic_name__ )
return
for i in range(len(__magic_name__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase_ = True
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 , __magic_name__ )
current_sequence.pop()
UpperCAmelCase_ = False
_lowerCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 121
| 0
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase__ ="\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCAmelCase__ ="\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCAmelCase__ ="\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )
__lowercase = float(spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : str , A_ : int ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ ={
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCAmelCase__ =[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 442
| 0
|
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase : Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = """Morse code here!"""
print(__UpperCamelCase )
snake_case_ : List[str] = encrypt(__UpperCamelCase )
print(__UpperCamelCase )
snake_case_ : str = decrypt(__UpperCamelCase )
print(__UpperCamelCase )
if __name__ == "__main__":
main()
| 58
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Dict = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = 'unispeech-sat'
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=504 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : int = feat_extract_norm
UpperCAmelCase : int = feat_extract_activation
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = conv_bias
UpperCAmelCase : Union[str, Any] = num_conv_pos_embeddings
UpperCAmelCase : Optional[Any] = num_conv_pos_embedding_groups
UpperCAmelCase : Any = len(self.conv_dim )
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : int = hidden_dropout
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Optional[Any] = feat_proj_dropout
UpperCAmelCase : Optional[int] = final_dropout
UpperCAmelCase : Any = layerdrop
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : int = vocab_size
UpperCAmelCase : int = num_clusters
UpperCAmelCase : int = do_stable_layer_norm
UpperCAmelCase : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : List[Any] = apply_spec_augment
UpperCAmelCase : Union[str, Any] = mask_time_prob
UpperCAmelCase : Dict = mask_time_length
UpperCAmelCase : List[str] = mask_time_min_masks
UpperCAmelCase : Dict = mask_feature_prob
UpperCAmelCase : str = mask_feature_length
UpperCAmelCase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase : int = num_codevectors_per_group
UpperCAmelCase : str = num_codevector_groups
UpperCAmelCase : Any = contrastive_logits_temperature
UpperCAmelCase : Union[str, Any] = feat_quantizer_dropout
UpperCAmelCase : str = num_negatives
UpperCAmelCase : int = codevector_dim
UpperCAmelCase : List[Any] = proj_codevector_dim
UpperCAmelCase : Any = diversity_loss_weight
# ctc loss
UpperCAmelCase : Any = ctc_loss_reduction
UpperCAmelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 160
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = KandinskyInpaintPipeline
lowerCamelCase_ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCamelCase_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCamelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase_ = False
@property
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
return 32
@property
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
return 100
@property
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__A )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__A )
return model
@property
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__A , )
SCREAMING_SNAKE_CASE__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self :Dict , __A :Optional[int] , __A :Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE__ = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 0
if str(__A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__A ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = """a hat"""
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
SCREAMING_SNAKE_CASE__ = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
| 59
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCamelCase = '\\n Text data.\n Second line of data.'
_lowerCamelCase = 'file'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , """w""" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = """custom_cache"""
SCREAMING_SNAKE_CASE__ = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ = xz_file
SCREAMING_SNAKE_CASE__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt"""
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
http_get("""https://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 59
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""image_processor""", """tokenizer"""]
A_ : Optional[Any] = """ChineseCLIPImageProcessor"""
A_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : str , _A : List[str]=None , _A : Dict=None , **_A : Dict ) -> Optional[int]:
__magic_name__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__magic_name__ : int = kwargs.pop('feature_extractor' )
__magic_name__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
__magic_name__ : str = self.image_processor
def __call__( self : str , _A : Union[str, Any]=None , _A : int=None , _A : Optional[int]=None , **_A : Optional[int] ) -> Any:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__magic_name__ : int = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
__magic_name__ : int = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
__magic_name__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def __lowerCAmelCase ( self : List[str] , *_A : str , **_A : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_A , **_A )
def __lowerCAmelCase ( self : int , *_A : str , **_A : List[Any] ) -> Dict:
return self.tokenizer.decode(*_A , **_A )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.tokenizer.model_input_names
__magic_name__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
| 561
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Optional[Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = """instructblip_vision_model"""
def __init__( self : List[Any] , _A : Dict=1408 , _A : Union[str, Any]=6144 , _A : Optional[int]=39 , _A : Optional[int]=16 , _A : Optional[int]=224 , _A : Any=14 , _A : Optional[int]="gelu" , _A : str=1E-6 , _A : str=0.0 , _A : str=1E-10 , _A : Optional[Any]=True , **_A : List[Any] , ) -> Dict:
super().__init__(**_A )
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : int = intermediate_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = patch_size
__magic_name__ : Tuple = image_size
__magic_name__ : int = initializer_range
__magic_name__ : str = attention_dropout
__magic_name__ : int = layer_norm_eps
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Tuple = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : List[Any] , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = """instructblip_qformer"""
def __init__( self : Dict , _A : Dict=30522 , _A : List[str]=768 , _A : Tuple=12 , _A : List[Any]=12 , _A : Optional[int]=3072 , _A : Optional[Any]="gelu" , _A : Tuple=0.1 , _A : Any=0.1 , _A : int=512 , _A : Tuple=0.02 , _A : Optional[Any]=1E-12 , _A : List[Any]=0 , _A : Tuple="absolute" , _A : Dict=2 , _A : Tuple=1408 , **_A : int , ) -> Optional[int]:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : Any = vocab_size
__magic_name__ : str = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : str = hidden_act
__magic_name__ : List[str] = intermediate_size
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : Union[str, Any] = position_embedding_type
__magic_name__ : Any = cross_attention_frequency
__magic_name__ : int = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : int , _A : Union[str, os.PathLike] , **_A : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : str = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = """instructblip"""
A_ : Any = True
def __init__( self : int , _A : Optional[int]=None , _A : List[str]=None , _A : Union[str, Any]=None , _A : Any=32 , **_A : int ) -> Any:
super().__init__(**_A )
if vision_config is None:
__magic_name__ : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__magic_name__ : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__magic_name__ : List[str] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__magic_name__ : Union[str, Any] = InstructBlipVisionConfig(**_A )
__magic_name__ : str = InstructBlipQFormerConfig(**_A )
__magic_name__ : int = text_config['model_type'] if 'model_type' in text_config else 'opt'
__magic_name__ : Tuple = CONFIG_MAPPING[text_model_type](**_A )
__magic_name__ : Optional[Any] = self.text_config.tie_word_embeddings
__magic_name__ : int = self.text_config.is_encoder_decoder
__magic_name__ : List[Any] = num_query_tokens
__magic_name__ : Tuple = self.vision_config.hidden_size
__magic_name__ : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__magic_name__ : int = 1.0
__magic_name__ : List[Any] = 0.02
@classmethod
def __lowerCAmelCase ( cls : str , _A : InstructBlipVisionConfig , _A : InstructBlipQFormerConfig , _A : PretrainedConfig , **_A : int , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : int = copy.deepcopy(self.__dict__ )
__magic_name__ : str = self.vision_config.to_dict()
__magic_name__ : List[str] = self.qformer_config.to_dict()
__magic_name__ : Tuple = self.text_config.to_dict()
__magic_name__ : str = self.__class__.model_type
return output
| 561
| 1
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> bool:
'''simple docstring'''
__lowerCamelCase : Optional[int] = len(_lowerCamelCase ) + 1
__lowerCamelCase : int = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCamelCase : Union[str, Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
__lowerCamelCase : Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCamelCase ):
__lowerCamelCase : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCamelCase ):
__lowerCamelCase : List[Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCamelCase ):
for j in range(1 , _lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCamelCase : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCamelCase : Tuple = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCamelCase : Dict = dp[i - 1][j]
else:
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__A = '''aab'''
__A = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 366
|
"""simple docstring"""
from functools import lru_cache
def lowercase_ ( _lowerCamelCase: int ) -> set:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(_lowerCamelCase ) )
def lowercase_ ( _lowerCamelCase: list ) -> bool:
'''simple docstring'''
return len(set(_lowerCamelCase ) ) in (0, 1)
def lowercase_ ( _lowerCamelCase: int ) -> list:
'''simple docstring'''
__lowerCamelCase : str = 2
while True:
# Increment each value of a generated range
__lowerCamelCase : int = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase : Dict = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def lowercase_ ( _lowerCamelCase: int = 4 ) -> int:
'''simple docstring'''
__lowerCamelCase : Any = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 366
| 1
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase_ ( ctypes.Structure ):
"""simple docstring"""
lowerCamelCase_ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
if os.name == "nt":
_SCREAMING_SNAKE_CASE = CursorInfo()
_SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
_SCREAMING_SNAKE_CASE = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
if os.name == "nt":
_SCREAMING_SNAKE_CASE = CursorInfo()
_SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
_SCREAMING_SNAKE_CASE = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 418
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_50_00_00 ) -> int:
_SCREAMING_SNAKE_CASE = defaultdict(__A )
_SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 418
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , 'env' )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-single' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 715
|
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if k in (0.04, 0.06):
UpperCamelCase = k
UpperCamelCase = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ):
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
UpperCamelCase , UpperCamelCase = img.shape
UpperCamelCase = []
UpperCamelCase = img.copy()
UpperCamelCase = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_GRAY2RGB )
UpperCamelCase , UpperCamelCase = np.gradient(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = dx**2
UpperCamelCase = dy**2
UpperCamelCase = dx * dy
UpperCamelCase = 0.04
UpperCamelCase = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE__ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE__ , w - offset ):
UpperCamelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = (wxx * wyy) - (wxy**2)
UpperCamelCase = wxx + wyy
UpperCamelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 170
| 0
|
import math
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [True] * n
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : Dict = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Any = index + i
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [2]
for i in range(3 , lowerCamelCase_ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase_ )
return primes
def __UpperCAmelCase ( lowerCamelCase_ : int = 99_99_66_66_33_33 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.floor(math.sqrt(lowerCamelCase_ ) ) + 1_00
SCREAMING_SNAKE_CASE_ : Tuple = prime_sieve(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Optional[int] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = last_prime**2
SCREAMING_SNAKE_CASE_ : Tuple = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : Optional[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 105
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
self.check_model_type(snake_case__ )
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}, {}
if padding is not None:
SCREAMING_SNAKE_CASE_ : Any = padding
if truncation is not None:
SCREAMING_SNAKE_CASE_ : Tuple = truncation
if top_k is not None:
SCREAMING_SNAKE_CASE_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,snake_case__ ,snake_case__ = None ,**snake_case__ ):
if isinstance(snake_case__ ,(Image.Image, str) ) and isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'image': image, 'question': question}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = image
SCREAMING_SNAKE_CASE_ : List[Any] = super().__call__(snake_case__ ,**snake_case__ )
return results
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[str] = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
inputs['question'] ,return_tensors=self.framework ,padding=snake_case__ ,truncation=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(images=snake_case__ ,return_tensors=self.framework )
model_inputs.update(snake_case__ )
return model_inputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model(**snake_case__ )
return model_outputs
def snake_case ( self ,snake_case__ ,snake_case__=5 ):
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Any = model_outputs.logits.sigmoid()[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = probs.topk(snake_case__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = scores.tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ ,snake_case__ )]
| 105
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__UpperCAmelCase , )
assert hasattr(self , '''env''' )
def lowerCamelCase ( self , __UpperCAmelCase=1 ):
'''simple docstring'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowerCamelCase ( self ):
'''simple docstring'''
# create estimator
__lowerCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCAmelCase )
| 622
|
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622
| 1
|
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
a__ = str(_snake_case )
a__ = "".join(sorted(_snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowerCAmelCase (_lowercase = 99 ):
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError("solution() only accepts values from 0 to 100" )
a__ = 0
a__ = 1
while True:
if check_bouncy(_snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(99)}")
| 331
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("T")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :List[Any], snake_case :bool = True):
"""simple docstring"""
_lowercase ={} # dictionary of lists
_lowercase =directed
def UpperCamelCase__ ( self :Optional[Any], snake_case :T, snake_case :T):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
self.adj_list[destination_vertex].append(snake_case)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
_lowercase =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case)
_lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase =[destination_vertex]
_lowercase =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
_lowercase =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase =[destination_vertex]
_lowercase =[]
return self
def __repr__( self :Optional[int]):
"""simple docstring"""
return pformat(self.adj_list)
| 181
| 0
|
'''simple docstring'''
def _UpperCamelCase ( _a : float , _a : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 287
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _a : int , _a : Tuple , _a : Dict=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__UpperCamelCase : List[Any] = nn.Parameter(_a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__UpperCamelCase : Dict = nn.Parameter(_a )
def _UpperCamelCase ( _a : Tuple , _a : str , _a : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = np.asarray(weights[0] )
__UpperCamelCase : str = np.asarray(weights[1] )
__UpperCamelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( _a : Union[str, Any] , _a : Any , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = np.asarray(weights[0] )
__UpperCamelCase : Tuple = np.asarray(weights[1] )
__UpperCamelCase : Tuple = np.asarray(weights[2] )
__UpperCamelCase : Optional[int] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( _a : Dict , _a : str , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = weights[0][0][0]
__UpperCamelCase : Tuple = np.asarray(layer_norm_a[0] )
__UpperCamelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# lsh weights + output
__UpperCamelCase : int = weights[0][1]
if len(_a ) < 4:
set_layer_weights_in_torch_lsh(_a , torch_block.attention , _a )
else:
set_layer_weights_in_torch_local(_a , torch_block.attention , _a )
# intermediate weighs
__UpperCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(_a ) == 4:
__UpperCamelCase : Optional[Any] = intermediate_weights[2]
# layernorm 2
__UpperCamelCase : int = np.asarray(intermediate_weights[0][0] )
__UpperCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# intermediate dense
__UpperCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
__UpperCamelCase : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
# intermediate out
__UpperCamelCase : Dict = np.asarray(intermediate_weights[4][0] )
__UpperCamelCase : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def _UpperCamelCase ( _a : int , _a : str , _a : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch_model.reformer
# word embeds
__UpperCamelCase : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_a ) , )
if isinstance(weights[3] , _a ):
__UpperCamelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__UpperCamelCase : Tuple = nn.Parameter(torch.tensor(_a ) )
__UpperCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCamelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_a , _a , _a )
# output layer norm
__UpperCamelCase : Optional[Any] = np.asarray(weights[7][0] )
__UpperCamelCase : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# output embeddings
__UpperCamelCase : List[Any] = np.asarray(weights[9][0] )
__UpperCamelCase : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def _UpperCamelCase ( _a : Optional[int] , _a : List[Any] , _a : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = ReformerConfig.from_json_file(_a )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase : List[str] = ReformerModelWithLMHead(_a )
with open(_a , 'rb' ) as f:
__UpperCamelCase : Tuple = pickle.load(_a )['weights']
set_model_weights_in_torch(_a , _a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a= parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 287
| 1
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase__ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase, lowerCamelCase ) -> bool:
_SCREAMING_SNAKE_CASE : Union[str, Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_SCREAMING_SNAKE_CASE : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_SCREAMING_SNAKE_CASE : List[str] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = 1.0, ):
return mean(
function_to_integrate(uniform(lowerCamelCase, lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowercase__ ( lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
_SCREAMING_SNAKE_CASE : Any = area_under_curve_estimator(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def lowercase__ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
_SCREAMING_SNAKE_CASE : str = area_under_curve_estimator(
lowerCamelCase, lowerCamelCase, 0.0, 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return abs(lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a, lowerCamelCase )
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = y, x % y
return abs(lowerCamelCase )
def lowercase__ ( ):
try:
_SCREAMING_SNAKE_CASE : Union[str, Any] = input('Enter two integers separated by comma (,): ' ).split(',' )
_SCREAMING_SNAKE_CASE : Dict = int(nums[0] )
_SCREAMING_SNAKE_CASE : Dict = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(lowerCamelCase, lowerCamelCase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase, lowerCamelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 621
| 1
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
snake_case__ = first_str.lower().strip()
snake_case__ = second_str.lower().strip()
# Remove whitespace
snake_case__ = first_str.replace(''' ''' , '''''' )
snake_case__ = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
# Default values for count should be 0
snake_case__ = defaultdict(__lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase__ : List[str] = input("""Enter the first string """).strip()
lowerCamelCase__ : Dict = input("""Enter the second string """).strip()
lowerCamelCase__ : int = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 208
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str | Literal[False]:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
snake_case__ = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
snake_case__ = []
while True:
snake_case__ = ['''$'''] * len(__lowerCAmelCase )
snake_case__ = []
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + 1 , len(__lowerCAmelCase ) ):
snake_case__ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case__ = '''*'''
snake_case__ = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCAmelCase ) == 0:
return pi
snake_case__ = list(set(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
for minterm in minterms:
snake_case__ = ''''''
for _ in range(__lowerCAmelCase ):
snake_case__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCAmelCase )
return temp
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
snake_case__ = [0] * len(__lowerCAmelCase )
for i in range(len(chart[0] ) ):
snake_case__ = 0
snake_case__ = -1
for j in range(len(__lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
snake_case__ = j
if count == 1:
snake_case__ = 1
for i in range(len(__lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
temp.append(prime_implicants[i] )
while True:
snake_case__ = 0
snake_case__ = -1
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = chart[i].count(1 )
if count_n > max_n:
snake_case__ = count_n
snake_case__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[int]]:
snake_case__ = [[0 for x in range(len(__lowerCAmelCase ) )] for x in range(len(__lowerCAmelCase ) )]
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCAmelCase ):
snake_case__ = 1
return chart
def SCREAMING_SNAKE_CASE ( ) -> None:
snake_case__ = int(input('''Enter the no. of variables\n''' ) )
snake_case__ = [
float(__lowerCAmelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case__ = decimal_to_binary(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = check(__lowerCAmelCase )
print('''Prime Implicants are:''' )
print(__lowerCAmelCase )
snake_case__ = prime_implicant_chart(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = selection(__lowerCAmelCase , __lowerCAmelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 208
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = SpeechTaTokenizer
lowerCAmelCase = False
lowerCAmelCase = True
def _UpperCamelCase ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(a )
snake_case_ = AddedToken('<mask>' , lstrip=a , rstrip=a )
snake_case_ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , a ) -> List[str]:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def _UpperCamelCase ( self , a , a=False , a=20 , a=5 ) -> List[str]:
snake_case_ , snake_case_ = self.get_input_output_texts(a )
snake_case_ = tokenizer.encode(a , add_special_tokens=a )
snake_case_ = tokenizer.decode(a , clean_up_tokenization_spaces=a )
return text, ids
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = '<pad>'
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _UpperCamelCase ( self ) -> int:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(a ) , 81 )
def _UpperCamelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(a )
self.assertNotEqual(a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
snake_case_ = tokenizer.add_tokens(a )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size + len(a ) )
snake_case_ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
snake_case_ = tokenizer.add_special_tokens(a )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size_a + len(a ) )
snake_case_ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> str:
pass
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(a , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
snake_case_ = tokenizer.convert_tokens_to_ids(a )
# fmt: off
self.assertListEqual(a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Use custom sequence because this tokenizer does not handle numbers.
snake_case_ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
snake_case_ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=a , )
| 198
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase = random.Random()
if is_torch_available():
import torch
def __UpperCAmelCase ( a_ , a_=1.0 , a_=None , a_=None):
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=7 , a=4_00 , a=20_00 , a=1 , a=0.0 , a=1_60_00 , a=True , a=True , ) -> Tuple:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def _UpperCamelCase ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self , a=False , a=False ) -> Optional[int]:
def _flatten(a ):
return list(itertools.chain(*a ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = ASTFeatureExtractor
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ASTFeatureExtractionTester(self )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
snake_case_ = feat_extract(a , padding=a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , padding=a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case_ = np.asarray(a )
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_00 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase ( self , a ) -> Tuple:
from datasets import load_dataset
snake_case_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case_ = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
# fmt: off
snake_case_ = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = ASTFeatureExtractor()
snake_case_ = feature_extractor(a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1E-4 ) )
| 198
| 1
|
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 712
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =BlenderbotSmallTokenizer
__lowerCamelCase : str =False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
__a = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__a = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__a = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__a = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] , **__lowercase : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = """adapt act apte"""
__a = """adapt act apte"""
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = """adapt act apte"""
__a = ["""adapt""", """act""", """ap@@""", """te"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1384]
__a = """I am a small frog."""
__a = tok([src_text] , padding=__lowercase , truncation=__lowercase )["""input_ids"""]
__a = tok.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
__a = """I am a small frog ."""
__a = """."""
__a = tok(__lowercase )["""input_ids"""]
__a = tok(__lowercase )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 547
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ : Tuple =logging.getLogger(__name__)
def a__ ( A__, A__ ):
return (preds == labels).mean()
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_UpperCAmelCase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_UpperCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ : int = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ : Tuple = processor.get_labels()
SCREAMING_SNAKE_CASE_ : Tuple = len(__lowerCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__lowerCamelCase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(A__ ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__lowerCamelCase, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ : int = DataCollatorWithPadding(__lowerCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : Dict = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=__lowerCamelCase, eval_dataset=__lowerCamelCase, compute_metrics=__lowerCamelCase, data_collator=__lowerCamelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(training_args.output_dir, 'eval_results.txt' )
if trainer.is_world_master():
with open(__lowerCamelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s', __lowerCamelCase, __lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__lowerCamelCase )
return results
def a__ ( A__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 101
|
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=512 , _UpperCAmelCase="cls" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : int = project_dim
__a : int = pooler_fn
__a : Optional[int] = learn_encoder
__a : List[Any] = use_attention_mask
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [R'''pooler''', R'''logit_scale''']
__lowerCAmelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
__lowerCAmelCase = '''roberta'''
__lowerCAmelCase = RobertaSeriesConfig
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__a : Tuple = XLMRobertaModel(_UpperCAmelCase )
__a : Any = nn.Linear(config.hidden_size , config.project_dim )
__a : Dict = getattr(_UpperCAmelCase , '''has_pre_transformation''' , _UpperCAmelCase )
if self.has_pre_transformation:
__a : Tuple = nn.Linear(config.hidden_size , config.project_dim )
__a : str = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_UpperCAmelCase , )
if self.has_pre_transformation:
__a : List[Any] = outputs['''hidden_states'''][-2]
__a : Optional[Any] = self.pre_LN(_UpperCAmelCase )
__a : List[str] = self.transformation_pre(_UpperCAmelCase )
return TransformationModelOutput(
projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 101
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''git_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Union[str, Any] = hidden_size
__a : Optional[int] = intermediate_size
__a : Tuple = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Optional[int] = num_channels
__a : Tuple = patch_size
__a : Any = image_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = attention_dropout
__a : Any = layer_norm_eps
__a : Dict = hidden_act
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Dict = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__a : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''git'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
if vision_config is None:
__a : Union[str, Any] = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__a : Union[str, Any] = GitVisionConfig(**_UpperCAmelCase )
__a : List[str] = vocab_size
__a : Any = hidden_size
__a : Dict = num_hidden_layers
__a : Tuple = num_attention_heads
__a : List[str] = hidden_act
__a : Any = intermediate_size
__a : int = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : int = initializer_range
__a : List[str] = layer_norm_eps
__a : Tuple = position_embedding_type
__a : Optional[int] = use_cache
__a : str = tie_word_embeddings
__a : Optional[Any] = num_image_with_embedding
__a : str = bos_token_id
__a : Dict = eos_token_id
def _lowerCamelCase ( self ):
__a : Optional[int] = copy.deepcopy(self.__dict__ )
__a : int = self.vision_config.to_dict()
__a : Tuple = self.__class__.model_type
return output
| 101
| 1
|
"""simple docstring"""
def lowercase (snake_case__ : str ) -> str:
'''simple docstring'''
return "".join(chr(ord(snake_case__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 169
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'efficientformer'
def __init__( self : Any , lowerCAmelCase : List[int] = [3, 2, 6, 4] , lowerCAmelCase : List[int] = [48, 96, 224, 448] , lowerCAmelCase : List[bool] = [True, True, True, True] , lowerCAmelCase : int = 448 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 4 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : float = 1e-5 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1e-12 , lowerCAmelCase : int = 224 , lowerCAmelCase : float = 1e-05 , **lowerCAmelCase : int , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = hidden_sizes
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = depths
lowerCAmelCase = mlp_expansion_ratio
lowerCAmelCase = downsamples
lowerCAmelCase = dim
lowerCAmelCase = key_dim
lowerCAmelCase = attention_ratio
lowerCAmelCase = resolution
lowerCAmelCase = pool_size
lowerCAmelCase = downsample_patch_size
lowerCAmelCase = downsample_stride
lowerCAmelCase = downsample_pad
lowerCAmelCase = drop_path_rate
lowerCAmelCase = num_metaad_blocks
lowerCAmelCase = distillation
lowerCAmelCase = use_layer_scale
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = image_size
lowerCAmelCase = batch_norm_eps
| 169
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->bool:
A__ : List[Any] = [int(UpperCAmelCase__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCAmelCase__ ) == 4 and all(0 <= int(UpperCAmelCase__ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
A_ = input().strip()
A_ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 713
|
"""simple docstring"""
import baseaa
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->bytes:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCAmelCase ( UpperCAmelCase__ : bytes ) ->str:
return baseaa.baadecode(UpperCAmelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
A_ = '''Hello World!'''
A_ = baseaa_encode(test)
print(encoded)
A_ = baseaa_decode(encoded)
print(decoded)
| 498
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class UpperCAmelCase ( _lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCAmelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase : str = "text"
UpperCAmelCase : str = "labels"
def UpperCAmelCase__ (self : Tuple , A__ : Optional[int] ) -> Optional[Any]:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowercase = copy.deepcopy(self )
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def UpperCAmelCase__ (self : List[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 310
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
lowercase = set({"(", "[", "{"} )
lowercase = set({")", "]", "}"} )
lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = input("Enter sequence of brackets: " )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , "is balanced" )
else:
print(lowerCAmelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 310
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_snake_case : str = None
_snake_case : Any = logging.get_logger(__name__)
_snake_case : List[str] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_snake_case : Any = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
_snake_case : Tuple = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
_snake_case : Any = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class lowerCAmelCase ( __UpperCAmelCase ):
a : str = VOCAB_FILES_NAMES
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = PRETRAINED_VOCAB_FILES_MAP
a : Dict = ["""input_ids""", """attention_mask"""]
a : Union[str, Any] = MBartTokenizer
a : List[int] = []
a : List[int] = []
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
vocab_file=UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else "en_XX"
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase ( self ):
return self._src_lang
@src_lang.setter
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self , UpperCamelCase , UpperCamelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self , UpperCamelCase , UpperCamelCase = None ):
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCamelCase )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def lowercase ( self , UpperCamelCase , UpperCamelCase = "en_XX" , UpperCamelCase = None , UpperCamelCase = "ro_RO" , **UpperCamelCase , ):
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def lowercase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCamelCase )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCamelCase )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase ( self , UpperCamelCase , UpperCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 493
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_snake_case : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_snake_case : Tuple = concatenate_datasets
_snake_case : Any = DownloadConfig
_snake_case : List[Any] = DownloadManager
_snake_case : Union[str, Any] = DownloadMode
_snake_case : Dict = DownloadConfig
_snake_case : List[str] = DownloadMode
_snake_case : Union[str, Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 493
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if "model" in orig_key:
snake_case_ = orig_key.replace('''model.''', '''''' )
if "norm1" in orig_key:
snake_case_ = orig_key.replace('''norm1''', '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
snake_case_ = orig_key.replace('''norm2''', '''output.LayerNorm''' )
if "norm" in orig_key:
snake_case_ = orig_key.replace('''norm''', '''LayerNorm''' )
if "transformer" in orig_key:
snake_case_ = orig_key.split('''.''' )[0].split('''_''' )[-1]
snake_case_ = orig_key.replace(F"transformer_{layer_num}", F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
snake_case_ = orig_key.replace('''mha.attn''', '''attention.self''' )
if "mha" in orig_key:
snake_case_ = orig_key.replace('''mha''', '''attention''' )
if "W_q" in orig_key:
snake_case_ = orig_key.replace('''W_q''', '''self.query''' )
if "W_k" in orig_key:
snake_case_ = orig_key.replace('''W_k''', '''self.key''' )
if "W_v" in orig_key:
snake_case_ = orig_key.replace('''W_v''', '''self.value''' )
if "ff1" in orig_key:
snake_case_ = orig_key.replace('''ff1''', '''intermediate.dense''' )
if "ff2" in orig_key:
snake_case_ = orig_key.replace('''ff2''', '''output.dense''' )
if "ff" in orig_key:
snake_case_ = orig_key.replace('''ff''', '''output.dense''' )
if "mlm_class" in orig_key:
snake_case_ = orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' )
if "mlm" in orig_key:
snake_case_ = orig_key.replace('''mlm''', '''cls.predictions.transform''' )
if "cls" not in orig_key:
snake_case_ = '''yoso.''' + orig_key
return orig_key
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case_ = val
snake_case_ = orig_state_dict['''cls.predictions.decoder.bias''']
snake_case_ = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = torch.load(lowerCAmelCase__, map_location='''cpu''' )['''model_state_dict''']
snake_case_ = YosoConfig.from_json_file(lowerCAmelCase__ )
snake_case_ = YosoForMaskedLM(lowerCAmelCase__ )
snake_case_ = convert_checkpoint_helper(config.max_position_embeddings, lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a : int = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 640
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29
| 0
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a__ : Optional[int] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'maskformer'
__UpperCAmelCase = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase = ['resnet', 'swin']
__UpperCAmelCase = ['detr']
def __init__( self , a = 2_56 , a = 2_56 , a = 0.1 , a = False , a = None , a = None , a = 0.02 , a = 1.0 , a = 1.0 , a = 1.0 , a = 20.0 , a = None , **a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(a , a ):
UpperCamelCase__ = backbone_config.pop("model_type" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ = (
decoder_config.pop("model_type" ) if isinstance(a , a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(a , a ):
UpperCamelCase__ = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ = config_class.from_dict(a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = decoder_config
# main feature dimension for the model
UpperCamelCase__ = fpn_feature_size
UpperCamelCase__ = mask_feature_size
# initializer
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ = cross_entropy_weight
UpperCamelCase__ = dice_weight
UpperCamelCase__ = mask_weight
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = no_object_weight
UpperCamelCase__ = output_auxiliary_logits
UpperCamelCase__ = self.decoder_config.encoder_attention_heads
UpperCamelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**a )
@classmethod
def __a ( cls , a , a , **a ):
return cls(
backbone_config=a , decoder_config=a , **a , )
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.decoder_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 701
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a__ : Any = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , *a , **a ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a , )
super().__init__(*a , **a )
| 223
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class lowercase_ ( A ):
__lowerCamelCase = "lxmert"
__lowerCamelCase = {}
def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=9_500 , __A=1_600 , __A=400 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1e-1_2 , __A=9 , __A=5 , __A=5 , __A=2_048 , __A=4 , __A=6.67 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=True , __A=True , **__A , ) -> Dict:
SCREAMING_SNAKE_CASE_ : str =vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE_ : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple =hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_ : str =initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_ : int =num_qa_labels
SCREAMING_SNAKE_CASE_ : int =num_object_labels
SCREAMING_SNAKE_CASE_ : List[str] =num_attr_labels
SCREAMING_SNAKE_CASE_ : List[str] =l_layers
SCREAMING_SNAKE_CASE_ : Optional[int] =x_layers
SCREAMING_SNAKE_CASE_ : int =r_layers
SCREAMING_SNAKE_CASE_ : Any =visual_feat_dim
SCREAMING_SNAKE_CASE_ : Any =visual_pos_dim
SCREAMING_SNAKE_CASE_ : List[Any] =visual_loss_normalizer
SCREAMING_SNAKE_CASE_ : Optional[int] =task_matched
SCREAMING_SNAKE_CASE_ : Optional[Any] =task_mask_lm
SCREAMING_SNAKE_CASE_ : str =task_obj_predict
SCREAMING_SNAKE_CASE_ : Dict =task_qa
SCREAMING_SNAKE_CASE_ : Dict =visual_obj_loss
SCREAMING_SNAKE_CASE_ : List[Any] =visual_attr_loss
SCREAMING_SNAKE_CASE_ : int =visual_feat_loss
SCREAMING_SNAKE_CASE_ : Tuple ={'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__A )
| 443
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int = 1_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE_ : int =2**power
SCREAMING_SNAKE_CASE_ : Tuple =0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 443
| 1
|
'''simple docstring'''
UpperCamelCase_ = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 715
|
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
UpperCamelCase : int = 10_000
UpperCamelCase : Optional[List[str]] = None
UpperCamelCase : Optional[datasets.Features] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase : str = ParquetConfig
def _lowercase ( self : List[str] ) -> Union[str, Any]:
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__ , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Tuple = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_a : Any = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : List[str] = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """rb""" ) as f:
_a : Union[str, Any] = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def _lowercase ( self : List[str] , UpperCAmelCase__ : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Dict = table_cast(UpperCAmelCase__ , self.info.features.arrow_schema )
return pa_table
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Any:
_a : Dict = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
with open(UpperCAmelCase__ , """rb""" ) as f:
_a : Any = pq.ParquetFile(UpperCAmelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_a : Optional[int] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 389
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : str = '''all_checks'''
UpperCamelCase : Any = '''basic_checks'''
UpperCamelCase : Union[str, Any] = '''no_checks'''
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_a : List[str] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
if record_checksum:
_a : int = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"""""" ):
m.update(UpperCamelCase__ )
_a : List[Any] = m.hexdigest()
else:
_a : Any = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 389
| 1
|
"""simple docstring"""
def UpperCAmelCase ( A__: Dict ) -> List[Any]:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Union[str, Any] = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1 , A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase ( A__: Union[str, Any] ) -> Union[str, Any]:
if len(A__ ) <= 1:
return arr, 0
__lowerCamelCase : Optional[Any] = len(A__ ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : Tuple = arr[mid:]
__lowerCamelCase : str = count_inversions_recursive(A__ )
__lowerCamelCase : str = count_inversions_recursive(A__ )
__lowerCamelCase : Any = _count_cross_inversions(A__ , A__ )
__lowerCamelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase ( A__: Optional[int] , A__: str ) -> Optional[int]:
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Union[str, Any] = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase ( ) -> List[str]:
__lowerCamelCase : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Tuple = count_inversions_bf(A__ )
__lowerCamelCase : Union[str, Any] = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : str = count_inversions_bf(A__ )
__lowerCamelCase : str = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , A__ )
# an empty list should also have zero inversions
__lowerCamelCase : Any = []
__lowerCamelCase : List[str] = count_inversions_bf(A__ )
__lowerCamelCase : Tuple = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , A__ )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCAmelCase ( A__: Tuple ) -> Union[str, Any]:
# getting number of pixels in the image
__lowerCamelCase , __lowerCamelCase : Optional[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
__lowerCamelCase : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ : Dict = imread('''image_data/lena.jpg''', 1)
# convert to its negative
a_ : str = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 263
| 0
|
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : int = 1_01 ) -> Dict:
"""simple docstring"""
_a = length
def __len__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
return i
class A :
def __call__( self : List[str] , lowerCAmelCase_ : Any ) -> str:
"""simple docstring"""
return {"input_ids": torch.tensor(lowerCAmelCase_ ), "labels": torch.tensor(lowerCAmelCase_ )}
class A ( nn.Module ):
def __init__( self : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_a = nn.Linear(1_20 , 80 )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None ) -> int:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A ( _a ):
@require_torch_neuroncore
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_a = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
_a = self.get_auto_remove_tmp_dir()
_a = F'--output_dir {output_dir}'.split()
_a = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A ( _a ):
@require_torch_multi_gpu
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_a = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
_a = self.get_auto_remove_tmp_dir()
_a = F'--output_dir {output_dir}'.split()
_a = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case : List[str] = HfArgumentParser((TrainingArguments,))
_snake_case : Optional[Any] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case : Any = DummyDataset(dataset_length)
def snake_case_ (UpperCamelCase : EvalPrediction ):
'''simple docstring'''
_a = list(range(len(UpperCamelCase ) ) )
_a = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
_snake_case : Optional[Any] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case : Any = 2
_snake_case : Optional[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case : Optional[Any] = None
| 22
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : list[tuple[float, float]] ) -> Optional[int]:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float = 0.01 ) -> Union[str, Any]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 133
| 0
|
"""simple docstring"""
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if name is None:
UpperCamelCase = None
else:
UpperCamelCase = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ":" , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ":" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 2 )
UpperCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 1 ).contiguous()
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
return param
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
# old versions did not store training args
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase = ds_args.padded_vocab_size
UpperCamelCase = ds_args.max_position_embeddings
UpperCamelCase = ds_args.hidden_size
UpperCamelCase = ds_args.num_layers
UpperCamelCase = ds_args.num_attention_heads
UpperCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase = config.n_head
# The hidden_size per head.
UpperCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase = input_state_dict["checkpoint_version"]
else:
UpperCamelCase = 0.0
# The model.
UpperCamelCase = input_state_dict["model"]
# The language model.
UpperCamelCase = model["language_model"]
# The embeddings.
UpperCamelCase = lm["embedding"]
# The word embeddings.
UpperCamelCase = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase = word_embeddings[: config.vocab_size, :]
UpperCamelCase = word_embeddings
# The position embeddings.
UpperCamelCase = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
UpperCamelCase = pos_embeddings
# The transformer.
UpperCamelCase = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase = m.group(3 )
# The name of the layer.
UpperCamelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase = masked_bias
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
UpperCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase = transformer["final_layernorm.weight"]
UpperCamelCase = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase = word_embeddings
# It should be done!
return output_state_dict
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_SCREAMING_SNAKE_CASE , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_SCREAMING_SNAKE_CASE , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase = parser.parse_args()
# Extract the basename.
UpperCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
UpperCamelCase = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase = "gelu_new"
else:
UpperCamelCase = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
UpperCamelCase = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
UpperCamelCase = "gpt2"
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = type(_SCREAMING_SNAKE_CASE ).__name__
UpperCamelCase = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 717
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case_ (self , __a , __a , __a ) -> str:
UpperCamelCase = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase = VideoClassificationPipeline(model=__a , image_processor=__a , top_k=2 )
UpperCamelCase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case_ (self , __a , __a ) -> str:
for example in examples:
UpperCamelCase = video_classifier(__a )
self.assertEqual(
__a , [
{"score": ANY(__a ), "label": ANY(__a )},
{"score": ANY(__a ), "label": ANY(__a )},
] , )
@require_torch
def snake_case_ (self ) -> int:
UpperCamelCase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase = pipeline(
"video-classification" , model=__a , feature_extractor=__a , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase = video_classifier(__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def snake_case_ (self ) -> Optional[Any]:
pass
| 544
| 0
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] ={}
SCREAMING_SNAKE_CASE_: int =job["""started_at"""]
SCREAMING_SNAKE_CASE_: List[Any] =job["""completed_at"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =date_parser.parse(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =date_parser.parse(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =start
SCREAMING_SNAKE_CASE_: int =end
SCREAMING_SNAKE_CASE_: Optional[int] =duration_in_min
return job_info
def __magic_name__ ( lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: str =None
if token is not None:
SCREAMING_SNAKE_CASE_: int ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE_: List[str] =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_: Dict =requests.get(lowercase , headers=lowercase ).json()
SCREAMING_SNAKE_CASE_: Any ={}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase ) for job in result["""jobs"""]} )
SCREAMING_SNAKE_CASE_: Dict =math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = get_job_time(args.workflow_run_id)
_UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 409
|
"""simple docstring"""
import argparse
import struct
import unittest
class a :
def __init__( self : List[str] , lowerCAmelCase : bytes ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =data
# Initialize hash values
SCREAMING_SNAKE_CASE_: int =[
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE_: List[Any] =[
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
SCREAMING_SNAKE_CASE_: int =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : bytes ) -> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =B"""\x80""" + (B"""\x00""" * (63 - (len(lowerCAmelCase ) + 8) % 64))
SCREAMING_SNAKE_CASE_: List[Any] =struct.pack(""">Q""" , (len(lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCamelCase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE_: Optional[int] =list(struct.unpack(""">16L""" , lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE_: Any =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE_: Tuple =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE_: List[str] =(
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
SCREAMING_SNAKE_CASE_: Dict =self.ror(lowerCAmelCase , 6 ) ^ self.ror(lowerCAmelCase , 11 ) ^ self.ror(lowerCAmelCase , 25 )
SCREAMING_SNAKE_CASE_: Any =(e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
SCREAMING_SNAKE_CASE_: List[Any] =self.ror(lowerCAmelCase , 2 ) ^ self.ror(lowerCAmelCase , 13 ) ^ self.ror(lowerCAmelCase , 22 )
SCREAMING_SNAKE_CASE_: int =(a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE_: Optional[int] =(sa + maj) % 0X100_000_000
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =(
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
SCREAMING_SNAKE_CASE_: Dict =[a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE_: List[Any] =[
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE_: Tuple ="""""".join([hex(lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
'''simple docstring'''
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> None:
'''simple docstring'''
import hashlib
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCAmelCase ).hash , hashlib.shaaaa(lowerCAmelCase ).hexdigest() )
def __magic_name__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[Any] =f.read()
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =bytes(lowercase , """utf-8""" )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main()
| 409
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowercase__ = downstream_dict["projector.weight"]
lowercase__ = downstream_dict["projector.bias"]
lowercase__ = downstream_dict["model.post_net.linear.weight"]
lowercase__ = downstream_dict["model.post_net.linear.bias"]
return model
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowercase__ = downstream_dict["model.linear.weight"]
lowercase__ = downstream_dict["model.linear.bias"]
return model
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = UniSpeechSatForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowercase__ = downstream_dict["connector.weight"]
lowercase__ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase__ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __a ( A , A , A , A ):
'''simple docstring'''
lowercase__ = torch.load(UpperCamelCase__ , map_location="cpu" )
lowercase__ = checkpoint["Downstream"]
lowercase__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
lowercase__ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowercase__ = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("ForAudioFrameClassification" ):
lowercase__ = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("ForXVector" ):
lowercase__ = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowercase__ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCAmelCase_: Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 720
|
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 415
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__magic_name__ =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __UpperCamelCase ( A , A , A , A , A ):
for attribute in key.split('''.''' ):
UpperCamelCase__ = getattr(A , A )
if weight_type is not None:
UpperCamelCase__ = getattr(A , A ).shape
else:
UpperCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCamelCase__ = None
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ = True
elif name.split('''.''' )[0] == "proj":
UpperCamelCase__ = fairseq_model.proj
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(A )[0].split('''.''' )[-2]
UpperCamelCase__ = mapped_key.replace('''*''' , A )
if "weight_g" in name:
UpperCamelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ = '''bias'''
elif "weight" in name:
UpperCamelCase__ = '''weight'''
else:
UpperCamelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase__ = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ = name.split('''.''' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A )
def __UpperCamelCase ( A ):
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(A , A , bias=A )
UpperCamelCase__ = emb.weight.data
return lin_layer
def __UpperCamelCase ( A ):
with open(A , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = [line.split(''' ''' )[0] for line in lines]
UpperCamelCase__ = len(A )
UpperCamelCase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( A , A , A , A , A , A , A , ):
UpperCamelCase__ = WavaVecaConfig.from_pretrained(A )
UpperCamelCase__ = SpeechaTextaConfig.from_pretrained(
A , vocab_size=A , decoder_layers=A , do_stable_layer_norm=A )
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCamelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCamelCase__ = WavaVecaModel(A )
UpperCamelCase__ = recursively_load_weights_wavaveca(model.encoder , A )
UpperCamelCase__ = SpeechaTextaForCausalLM(A )
UpperCamelCase__ , UpperCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
UpperCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCamelCase__ = SpeechEncoderDecoderModel(encoder=A , decoder=A )
UpperCamelCase__ = False
# add projection layer
UpperCamelCase__ = nn.Parameter(projection_layer.weight )
UpperCamelCase__ = nn.Parameter(projection_layer.bias )
UpperCamelCase__ = create_vocab_dict(A )
with open(os.path.join(A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(A , A )
UpperCamelCase__ = SpeechaTextaTokenizer(os.path.join(A , '''vocab.json''' ) )
tokenizer.save_pretrained(A )
UpperCamelCase__ = hf_wavavec.config.to_dict()
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer.eos_token_id
UpperCamelCase__ = '''speech_to_text_2'''
UpperCamelCase__ = '''wav2vec2'''
UpperCamelCase__ = SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__magic_name__ =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 415
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A ( __snake_case ):
__magic_name__ = '''mobilenet_v1'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0.001 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
A : Optional[Any] = num_channels
A : List[str] = image_size
A : List[str] = depth_multiplier
A : Optional[Any] = min_depth
A : Dict = hidden_act
A : List[str] = tf_padding
A : Any = classifier_dropout_prob
A : Union[str, Any] = initializer_range
A : Union[str, Any] = layer_norm_eps
class A ( __snake_case ):
__magic_name__ = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
return 1e-4
| 343
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase : Optional[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( __snake_case ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : int = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : str = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
A : Dict = model_type
A : Union[str, Any] = tf_checkpoint
A : Optional[Any] = pytorch_dump_output
A : List[str] = config
A : Tuple = finetuning_task_name
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
A : List[Any] = self._tf_checkpoint
A : List[Any] = ''''''
else:
A : List[str] = self._tf_checkpoint
A : Optional[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 343
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase( __a : NDArray[floataa] , __a : NDArray[floataa] , __a : list[int] , __a : int , ):
a__ , a__ =coefficient_matrix.shape
a__ , a__ =constant_matrix.shape
if rowsa != colsa:
a__ =f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__a )
if colsa != 1:
a__ =f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__a )
if rowsa != rowsa:
a__ =(
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__a )
if len(__a ) != rowsa:
a__ =(
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(__a )} and {rowsa}"""
)
raise ValueError(__a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
a__ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a__ , a__ =table.shape
strictly_diagonally_dominant(__a )
# Iterates the whole matrix for given number of times
for _ in range(__a ):
a__ =[]
for row in range(__a ):
a__ =0
for col in range(__a ):
if col == row:
a__ =table[row][col]
elif col == cols - 1:
a__ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a__ =(temp + val) / denom
new_val.append(__a )
a__ =new_val
return [float(__a ) for i in new_val]
def _lowercase( __a : NDArray[floataa] ):
a__ , a__ =table.shape
a__ =True
for i in range(0 , __a ):
a__ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Dict = Dataset.from_list(A_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(A_ ):
self.assertDictEqual(A_ , example_records[i] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Optional[Any] = Dataset.from_list(A_ )
__lowerCAmelCase : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase__ ( self ) ->Union[str, Any]: # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase : List[Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def UpperCamelCase__ ( self ) ->Tuple: # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = Dataset.from_list([] )
self.assertEqual(len(A_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 492
| 0
|
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = num_of_nodes
_UpperCamelCase = []
_UpperCamelCase = {}
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Union[str, Any]:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight])
def __UpperCAmelCase ( self : Tuple , lowercase_ : int) -> List[Any]:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCAmelCase ( self : Tuple , lowercase_ : int) -> List[Any]:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCamelCase = self.find_component(UpperCamelCase__)
def __UpperCAmelCase ( self : str , lowercase_ : list[int] , lowercase_ : int , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
_UpperCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__)
elif component_size[u_node] >= component_size[v_node]:
_UpperCamelCase = self.find_component(UpperCamelCase__)
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
_UpperCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCamelCase = edge
_UpperCamelCase = self.m_component[u]
_UpperCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
_UpperCamelCase = edge
_UpperCamelCase = self.m_component[u]
_UpperCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n')
num_of_components -= 1
_UpperCamelCase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}')
def lowerCAmelCase__ ( ) ->Optional[int]:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a = ["gpt2"]
a = "gpt2"
if is_tf_available():
class __a ( tf.Module ):
def __init__( self : str ,lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = TFGPTaLMHeadModel.from_config(lowerCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="""text""" ),) )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenized["""input_ids"""].to_tensor()
__SCREAMING_SNAKE_CASE = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__SCREAMING_SNAKE_CASE = self.model(input_ids=lowerCamelCase ,attention_mask=lowerCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [GPTaTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__SCREAMING_SNAKE_CASE = [TFGPTaTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__SCREAMING_SNAKE_CASE = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__SCREAMING_SNAKE_CASE = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE = tokenizer([test_inputs] ,return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__SCREAMING_SNAKE_CASE = python_outputs[key].numpy()
__SCREAMING_SNAKE_CASE = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase ,tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.function(lowerCamelCase )
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE = tf.constant(lowerCamelCase )
__SCREAMING_SNAKE_CASE = compiled_tokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = ModelToSave(tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = model.serving(lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """saved.model"""
tf.saved_model.save(lowerCamelCase ,lowerCamelCase ,signatures={"""serving_default""": model.serving} )
__SCREAMING_SNAKE_CASE = tf.saved_model.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = loaded_model.signatures["""serving_default"""](lowerCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase ) # Build model with some sample inputs
__SCREAMING_SNAKE_CASE = tf_tokenizer.get_config()
__SCREAMING_SNAKE_CASE = TFGPTaTokenizer.from_config(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_from_config(lowerCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__SCREAMING_SNAKE_CASE = 12_3123
for max_length in [3, 5, 1024]:
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase ,max_length=lowerCamelCase )
__SCREAMING_SNAKE_CASE = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 109
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : List[str] =logging.get_logger("""transformers.models.speecht5""")
_lowerCAmelCase : List[Any] ={
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_lowerCAmelCase : List[str] ={
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_lowerCAmelCase : Optional[Any] ={
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_lowerCAmelCase : int ={
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_lowerCAmelCase : List[Any] ={
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_lowerCAmelCase : Any ={
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_lowerCAmelCase : Dict ={
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_lowerCAmelCase : List[str] ={
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_lowerCAmelCase : List[Any] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : int ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Dict ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Optional[Any] =[]
_lowerCAmelCase : int =[
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_lowerCAmelCase : Any =IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_lowerCAmelCase : int =IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_lowerCAmelCase : Union[str, Any] =IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for attribute in key.split("." ):
UpperCAmelCase__: List[Any] = getattr(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCAmelCase__: Optional[int] = getattr(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ).shape
else:
UpperCAmelCase__: int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase__: Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__: str = value
elif weight_type == "weight_v":
UpperCAmelCase__: Optional[int] = value
elif weight_type == "bias":
UpperCAmelCase__: Union[str, Any] = value
elif weight_type == "running_mean":
UpperCAmelCase__: Optional[Any] = value
elif weight_type == "running_var":
UpperCAmelCase__: Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__: Optional[int] = value
else:
UpperCAmelCase__: Union[str, Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[Any] = []
if task == "s2t":
UpperCAmelCase__: List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__: Tuple = MAPPING_S2T
UpperCAmelCase__: int = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__: Dict = None
UpperCAmelCase__: Any = MAPPING_T2S
UpperCAmelCase__: List[Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__: List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__: Union[str, Any] = MAPPING_S2S
UpperCAmelCase__: Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
logger.info(f"{name} was ignored" )
continue
UpperCAmelCase__: Any = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,hf_model.config.feat_extract_norm == "group" ,)
UpperCAmelCase__: Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase__: str = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__: Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__: Any = name.split(SCREAMING_SNAKE_CASE )[0].split("." )[-2]
UpperCAmelCase__: Dict = mapped_key.replace("*" ,SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCAmelCase__: List[str] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__: Union[str, Any] = "weight_v"
elif "bias" in name:
UpperCAmelCase__: str = "bias"
elif "weight" in name:
UpperCAmelCase__: List[Any] = "weight"
elif "running_mean" in name:
UpperCAmelCase__: List[Any] = "running_mean"
elif "running_var" in name:
UpperCAmelCase__: Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__: Optional[int] = "num_batches_tracked"
else:
UpperCAmelCase__: Dict = None
set_recursively(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[Any] = full_name.split("conv_layers." )[-1]
UpperCAmelCase__: Union[str, Any] = name.split("." )
UpperCAmelCase__: Dict = int(items[0] )
UpperCAmelCase__: List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase__: Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase__: Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase__: str = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase__: Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=None ,SCREAMING_SNAKE_CASE=None ,SCREAMING_SNAKE_CASE=None ,):
if config_path is not None:
UpperCAmelCase__: Optional[int] = SpeechTaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__: Tuple = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__: List[Any] = config.max_text_positions
UpperCAmelCase__: List[str] = SpeechTaForSpeechToText(SCREAMING_SNAKE_CASE )
elif task == "t2s":
UpperCAmelCase__: Dict = 1_8_7_6
UpperCAmelCase__: Dict = 6_0_0
UpperCAmelCase__: str = config.max_speech_positions
UpperCAmelCase__: int = SpeechTaForTextToSpeech(SCREAMING_SNAKE_CASE )
elif task == "s2s":
UpperCAmelCase__: Any = 1_8_7_6
UpperCAmelCase__: List[str] = config.max_speech_positions
UpperCAmelCase__: Union[str, Any] = SpeechTaForSpeechToSpeech(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
UpperCAmelCase__: int = SpeechTaTokenizer(SCREAMING_SNAKE_CASE ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__: List[str] = AddedToken("<mask>" ,lstrip=SCREAMING_SNAKE_CASE ,rstrip=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCAmelCase__: int = SpeechTaFeatureExtractor()
UpperCAmelCase__: Optional[Any] = SpeechTaProcessor(tokenizer=SCREAMING_SNAKE_CASE ,feature_extractor=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: int = torch.load(SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint["model"] ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCAmelCase : Optional[int] =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 113
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> Optional[Any]:
lowercase_ : Any = inspect.getfile(accelerate.test_utils )
lowercase_ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase_ : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowercase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def A ( self : List[str] ) -> List[str]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase_ : int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : List[Any] ) -> List[Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase_ : List[str] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : str ) -> Union[str, Any]:
lowercase_ : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[int] ) -> Optional[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
lowercase_ : Optional[int] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
__A : List[Any] = Accelerator()
__A : Dict = (accelerator.state.process_index + 2, 10)
__A : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
__A : Optional[Any] = ''''''
__A : Optional[int] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__A : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__A : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 141
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : int = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['''DeiTFeatureExtractor''']
__A : int = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 141
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ):
__magic_name__ = size if size is not None else {'''shortest_edge''': 18}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def lowerCAmelCase__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
__magic_name__ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 490
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 490
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : str = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Dict=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Optional[int] , ) ->None:
'''simple docstring'''
A__ = do_resize
A__ = do_rescale
A__ = size_divisor
A__ = resample
super().__init__(**UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Dict) ->np.ndarray:
'''simple docstring'''
A__ , A__ = get_image_size(UpperCAmelCase__)
# Rounds the height and width down to the closest multiple of size_divisor
A__ = height // size_divisor * size_divisor
A__ = width // size_divisor * size_divisor
A__ = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__)
return image
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : List[str]) ->np.ndarray:
'''simple docstring'''
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[str] , ) ->BatchFeature:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = size_divisor if size_divisor is not None else self.size_divisor
A__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
A__ = make_list_of_images(UpperCAmelCase__)
if not valid_images(UpperCAmelCase__):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCAmelCase__) for img in images]
if do_resize:
A__ = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__) for image in images]
if do_rescale:
A__ = [self.rescale(UpperCAmelCase__ , scale=1 / 255) for image in images]
A__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__)
| 177
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = XGLMConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=14 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[Any]=0.02 , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFXGLMModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=True) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
A__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0])
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__)
A__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = '''left'''
# use different length sentences to test batching
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding=UpperCAmelCase__)
A__ = inputs['''input_ids''']
A__ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
A__ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence])
| 177
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''camembert'''
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 469
|
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__lowerCamelCase = 0
for ch in input_str:
__lowerCamelCase = ord(UpperCamelCase__ )
__lowerCamelCase = pow(2 , UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469
| 1
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCamelCase = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
UpperCamelCase = 10
UpperCamelCase = 256
def _lowerCamelCase ( UpperCAmelCase_ : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(UpperCAmelCase_ ) < MIN_NUM_TOKENS:
return None
A__ = MinHash(num_perm=UpperCAmelCase_ )
for token in set(UpperCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCAmelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , *,
SCREAMING_SNAKE_CASE__ = 0.8_5 , ) -> Tuple:
A__ = duplication_jaccard_threshold
A__ = NUM_PERM
A__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
A__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[List[Dict]]:
A__ = []
for base, duplicates in self._duplicate_clusters.items():
A__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
A__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> None:
A__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ = element
A__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] ) -> Optional[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(UpperCAmelCase_, max_queue_size=10000 ), chunksize=100, ):
if data is not None:
yield data
def _lowerCamelCase ( UpperCAmelCase_ : Type[Dataset], UpperCAmelCase_ : float ) -> Tuple:
"""simple docstring"""
A__ = DuplicationIndex(duplication_jaccard_threshold=UpperCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCAmelCase_ ) ), max_queue_size=100 ) ):
di.add(UpperCAmelCase_, UpperCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> float:
"""simple docstring"""
A__ = get_tokens(UpperCAmelCase_ )
A__ = get_tokens(UpperCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCamelCase = None
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
A__ = []
for elementa in cluster:
A__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
A__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(UpperCAmelCase_, UpperCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A__ = 1
extremes.append(UpperCAmelCase_ )
return extremes
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Any, UpperCAmelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
global _shared_dataset
A__ = dataset
A__ = []
A__ = partial(_find_cluster_extremes_shared, jaccard_threshold=UpperCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCAmelCase_, UpperCAmelCase_, ), total=len(UpperCAmelCase_ ), ):
extremes_list.append(UpperCAmelCase_ )
return extremes_list
def _lowerCamelCase ( UpperCAmelCase_ : Type[Dataset], UpperCAmelCase_ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
A__ = make_duplicate_clusters(UpperCAmelCase_, UpperCAmelCase_ )
A__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
A__ = {}
A__ = find_extremes(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
A__ = element
A__ = duplicate_indices - set(extreme_dict.keys() )
A__ = dataset.filter(lambda UpperCAmelCase_, UpperCAmelCase_ : idx not in remove_indices, with_indices=UpperCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
A__ = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(UpperCAmelCase_ )}""" )
print(F"""Number of duplicate clusters: {len(UpperCAmelCase_ )}""" )
print(F"""Files in duplicate cluster: {len(UpperCAmelCase_ )}""" )
print(F"""Unique files in duplicate cluster: {len(UpperCAmelCase_ )}""" )
print(F"""Filtered dataset size: {len(UpperCAmelCase_ )}""" )
return ds_filter, duplicate_clusters
| 562
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = "altclip_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=250002 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=514 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-05 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=768 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "altclip_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Any = "altclip"
A__ : str = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=2.6_5_9_2 , **SCREAMING_SNAKE_CASE__ ) -> Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A__ = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(SCREAMING_SNAKE_CASE__ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ )
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 562
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """maskformer"""
_snake_case : Any = {"""hidden_size""": """mask_feature_size"""}
_snake_case : Union[str, Any] = ["""resnet""", """swin"""]
_snake_case : List[str] = ["""detr"""]
def __init__( self :Any , lowerCamelCase__ :int = 2_56 , lowerCamelCase__ :int = 2_56 , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[Dict] = None , lowerCamelCase__ :Optional[Dict] = None , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 20.0 , lowerCamelCase__ :Optional[bool] = None , **lowerCamelCase__ :str , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ :Optional[Any] = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = backbone_config.pop("""model_type""" )
UpperCamelCase__ :Any = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ :Optional[int] = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ :Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ :Optional[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :Any = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ :Optional[Any] = config_class.from_dict(lowerCamelCase__ )
UpperCamelCase__ :Tuple = backbone_config
UpperCamelCase__ :Optional[int] = decoder_config
# main feature dimension for the model
UpperCamelCase__ :Union[str, Any] = fpn_feature_size
UpperCamelCase__ :int = mask_feature_size
# initializer
UpperCamelCase__ :Union[str, Any] = init_std
UpperCamelCase__ :Tuple = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ :Dict = cross_entropy_weight
UpperCamelCase__ :int = dice_weight
UpperCamelCase__ :Optional[Any] = mask_weight
UpperCamelCase__ :int = use_auxiliary_loss
UpperCamelCase__ :List[str] = no_object_weight
UpperCamelCase__ :Any = output_auxiliary_logits
UpperCamelCase__ :Union[str, Any] = self.decoder_config.encoder_attention_heads
UpperCamelCase__ :Any = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , **lowerCamelCase__ :List[Any] ):
return cls(
backbone_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __a ( self :List[Any] ):
UpperCamelCase__ :List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :Dict = self.backbone_config.to_dict()
UpperCamelCase__ :str = self.decoder_config.to_dict()
UpperCamelCase__ :Optional[int] = self.__class__.model_type
return output
| 45
|
"""simple docstring"""
from typing import Any
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = data
UpperCAmelCase__ : List[str] = None
def __repr__( self ):
return F'''Node({self.data})'''
class a :
def __init__( self ):
UpperCAmelCase__ : Any = None
def __iter__( self ):
UpperCAmelCase__ : List[str] = self.head
while node:
yield node.data
UpperCAmelCase__ : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __getitem__( self , UpperCamelCase_ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCAmelCase__ : List[str] = self.head
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = current.next
UpperCAmelCase__ : List[str] = data
def __snake_case ( self , UpperCamelCase_ ):
self.insert_nth(len(self ) , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
self.insert_nth(0 , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCAmelCase__ : str = Node(UpperCamelCase_ )
if self.head is None:
UpperCAmelCase__ : Tuple = new_node
elif index == 0:
UpperCAmelCase__ : Optional[int] = self.head # link new_node to head
UpperCAmelCase__ : Any = new_node
else:
UpperCAmelCase__ : Dict = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : int = temp.next
UpperCAmelCase__ : Tuple = new_node
def __snake_case ( self ): # print every node data
print(self )
def __snake_case ( self ):
return self.delete_nth(0 )
def __snake_case ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __snake_case ( self , UpperCamelCase_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCAmelCase__ : Union[str, Any] = self.head # default first node
if index == 0:
UpperCAmelCase__ : Dict = self.head.next
else:
UpperCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Any = temp.next
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Tuple = temp.next.next
return delete_node.data
def __snake_case ( self ):
return self.head is None
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : str = self.head
while current:
# Store the current node's next node.
UpperCAmelCase__ : Any = current.next
# Make the current node's next point backwards
UpperCAmelCase__ : Optional[int] = prev
# Make the previous node be the current node
UpperCAmelCase__ : List[Any] = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase__ : int = next_node
# Return prev in order to put the head at the end
UpperCAmelCase__ : str = prev
def lowerCamelCase ( ):
UpperCAmelCase__ : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(_snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_snake_case ) == i
linked_list.insert_nth(_snake_case ,i + 1 )
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_snake_case ) == 9
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCAmelCase__ : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(-8 ,1 ) )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase__ : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase__ : Any = linked_list.delete_head()
assert result == -9
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase__ : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase__ : Dict = linked_list.delete_nth(10 )
assert result is None
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_snake_case )
assert (
str(_snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ( ):
from doctest import testmod
testmod()
UpperCAmelCase__ : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_snake_case )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCAmelCase__ : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_snake_case )
print(F'''length of linked_list is : {len(_snake_case )}''' )
if __name__ == "__main__":
main()
| 110
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
def lowerCAmelCase ( self , _lowerCamelCase ):
return 0.0
def _UpperCamelCase ( _a : np.ndarray , _a : int ):
"""simple docstring"""
__UpperCamelCase : Any = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase : int = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCamelCase ( _a : FilterType , _a : int ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 5_1_2
__UpperCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
__UpperCamelCase : List[str] = [filter_type.process(_a ) for item in inputs]
__UpperCamelCase : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Any = np.abs(np.fft.fft(_a ) )
__UpperCamelCase : Union[str, Any] = 2_0 * np.logaa(_a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase : int = get_bounds(_a , _a )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(_a )
plt.show()
def _UpperCamelCase ( _a : FilterType , _a : int ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 5_1_2
__UpperCamelCase : int = [1] + [0] * (size - 1)
__UpperCamelCase : List[str] = [filter_type.process(_a ) for item in inputs]
__UpperCamelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.angle(np.fft.fft(_a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(_a , -2 * pi ) )
plt.show()
| 287
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _a : int , _a : Tuple , _a : Dict=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__UpperCamelCase : List[Any] = nn.Parameter(_a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__UpperCamelCase : Dict = nn.Parameter(_a )
def _UpperCamelCase ( _a : Tuple , _a : str , _a : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = np.asarray(weights[0] )
__UpperCamelCase : str = np.asarray(weights[1] )
__UpperCamelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( _a : Union[str, Any] , _a : Any , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = np.asarray(weights[0] )
__UpperCamelCase : Tuple = np.asarray(weights[1] )
__UpperCamelCase : Tuple = np.asarray(weights[2] )
__UpperCamelCase : Optional[int] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( _a : Dict , _a : str , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = weights[0][0][0]
__UpperCamelCase : Tuple = np.asarray(layer_norm_a[0] )
__UpperCamelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# lsh weights + output
__UpperCamelCase : int = weights[0][1]
if len(_a ) < 4:
set_layer_weights_in_torch_lsh(_a , torch_block.attention , _a )
else:
set_layer_weights_in_torch_local(_a , torch_block.attention , _a )
# intermediate weighs
__UpperCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(_a ) == 4:
__UpperCamelCase : Optional[Any] = intermediate_weights[2]
# layernorm 2
__UpperCamelCase : int = np.asarray(intermediate_weights[0][0] )
__UpperCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# intermediate dense
__UpperCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
__UpperCamelCase : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
# intermediate out
__UpperCamelCase : Dict = np.asarray(intermediate_weights[4][0] )
__UpperCamelCase : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def _UpperCamelCase ( _a : int , _a : str , _a : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch_model.reformer
# word embeds
__UpperCamelCase : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_a ) , )
if isinstance(weights[3] , _a ):
__UpperCamelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__UpperCamelCase : Tuple = nn.Parameter(torch.tensor(_a ) )
__UpperCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCamelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_a , _a , _a )
# output layer norm
__UpperCamelCase : Optional[Any] = np.asarray(weights[7][0] )
__UpperCamelCase : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# output embeddings
__UpperCamelCase : List[Any] = np.asarray(weights[9][0] )
__UpperCamelCase : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def _UpperCamelCase ( _a : Optional[int] , _a : List[Any] , _a : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = ReformerConfig.from_json_file(_a )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase : List[str] = ReformerModelWithLMHead(_a )
with open(_a , 'rb' ) as f:
__UpperCamelCase : Tuple = pickle.load(_a )['weights']
set_model_weights_in_torch(_a , _a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a= parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 287
| 1
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
A_ : Tuple =logging.getLogger(__name__)
A_ : Dict =list(MODEL_FOR_MASKED_LM_MAPPING.keys())
A_ : Union[str, Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCAmelCase :
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__UpperCAmelCase )} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__A : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase_ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __UpperCAmelCase :
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__A : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__A : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__A : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__A : bool = field(
default=__UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCAmelCase_ ( self ):
if self.train_file is not None:
lowerCAmelCase_ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCAmelCase_ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case_ ( __snake_case : Any , __snake_case : Optional[int]) -> Tuple:
with open(__snake_case , '''r''' , encoding='''utf-8''') as f:
lowerCAmelCase_ = [json.loads(__snake_case) for line in f.read().splitlines() if (len(__snake_case) > 0 and not line.isspace())]
assert len(__snake_case) == len(__snake_case)
lowerCAmelCase_ = {c: dataset[c] for c in dataset.column_names}
lowerCAmelCase_ = refs
return Dataset.from_dict(__snake_case)
def snake_case_ ( ) -> int:
lowerCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase_ = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''')
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCAmelCase_ = {}
if data_args.train_file is not None:
lowerCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase_ = data_args.validation_file
lowerCAmelCase_ = data_args.train_file.split('''.''')[-1]
if extension == "txt":
lowerCAmelCase_ = '''text'''
lowerCAmelCase_ = load_dataset(__snake_case , data_files=__snake_case)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase_ = AutoConfig.from_pretrained(model_args.config_name , **__snake_case)
elif model_args.model_name_or_path:
lowerCAmelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__snake_case)
else:
lowerCAmelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''')
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''')
config.update_from_string(model_args.config_overrides)
logger.info(F'''New config: {config}''')
lowerCAmelCase_ = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__snake_case)
elif model_args.model_name_or_path:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__snake_case)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''')
if model_args.model_name_or_path:
lowerCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''')
lowerCAmelCase_ = AutoModelForMaskedLM.from_config(__snake_case)
model.resize_token_embeddings(len(__snake_case))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCAmelCase_ = datasets['''train'''].column_names
else:
lowerCAmelCase_ = datasets['''validation'''].column_names
lowerCAmelCase_ = '''text''' if '''text''' in column_names else column_names[0]
lowerCAmelCase_ = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__snake_case : Tuple):
# Remove empty lines
lowerCAmelCase_ = [line for line in examples['''text'''] if len(__snake_case) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__snake_case , truncation=__snake_case , max_length=data_args.max_seq_length)
lowerCAmelCase_ = datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCAmelCase_ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCAmelCase_ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCAmelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCAmelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCAmelCase_ = DataCollatorForWholeWordMask(tokenizer=__snake_case , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCAmelCase_ = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCAmelCase_ = model_args.model_name_or_path
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = trainer.train(resume_from_checkpoint=__snake_case)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase_ = os.path.join(training_args.output_dir , '''train_results.txt''')
if trainer.is_world_process_zero():
with open(__snake_case , '''w''') as writer:
logger.info('''***** Train results *****''')
for key, value in sorted(train_result.metrics.items()):
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json'''))
# Evaluation
lowerCAmelCase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
lowerCAmelCase_ = trainer.evaluate()
lowerCAmelCase_ = math.exp(eval_output['''eval_loss'''])
lowerCAmelCase_ = perplexity
lowerCAmelCase_ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''')
if trainer.is_world_process_zero():
with open(__snake_case , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in sorted(results.items()):
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
return results
def snake_case_ ( __snake_case : List[Any]) -> int:
main()
if __name__ == "__main__":
main()
| 274
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(UpperCamelCase )
__lowerCAmelCase = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(UpperCamelCase , UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase , "__name__" , UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("transformers" )
if hasattr(UpperCamelCase , UpperCamelCase ):
return getattr(UpperCamelCase , UpperCamelCase )
return None
def _UpperCAmelCase ( UpperCamelCase: Union[str, os.PathLike] , UpperCamelCase: Optional[Union[str, os.PathLike]] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: Optional[Dict[str, str]] = None , UpperCamelCase: Optional[Union[bool, str]] = None , UpperCamelCase: Optional[str] = None , UpperCamelCase: bool = False , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
__lowerCAmelCase = get_file_from_repo(
UpperCamelCase , UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , resume_download=UpperCamelCase , proxies=UpperCamelCase , use_auth_token=UpperCamelCase , revision=UpperCamelCase , local_files_only=UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase )
class a :
def __init__( self : Optional[Any] ):
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Dict , **snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("config" , snake_case__ )
__lowerCAmelCase = kwargs.pop("trust_remote_code" , snake_case__ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = ImageProcessingMixin.get_image_processor_dict(snake_case__ , **snake_case__ )
__lowerCAmelCase = config_dict.get("image_processor_type" , snake_case__ )
__lowerCAmelCase = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCAmelCase = config_dict.pop("feature_extractor_type" , snake_case__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCAmelCase = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCAmelCase = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.image_processor_type``
__lowerCAmelCase = getattr(snake_case__ , "image_processor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCAmelCase = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCAmelCase = image_processor_class_from_name(snake_case__ )
__lowerCAmelCase = image_processor_auto_map is not None
__lowerCAmelCase = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
__lowerCAmelCase = kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
__lowerCAmelCase = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(snake_case__ , snake_case__ )
| 611
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : str = TFAutoModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[str] = AutoModel.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Any = TFAutoModelForPreTraining.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[Any] = AutoModelForPreTraining.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Any ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : int = TFAutoModelForCausalLM.from_pretrained(lowercase_ , from_pt=lowercase_ )
snake_case_ : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : int = AutoModelForCausalLM.from_pretrained(lowercase_ , from_tf=lowercase_ )
snake_case_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[str] = AutoModelWithLMHead.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase_ , from_pt=lowercase_ )
snake_case_ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Tuple = AutoModelForMaskedLM.from_pretrained(lowercase_ , from_tf=lowercase_ )
snake_case_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ , from_pt=lowercase_ )
snake_case_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ , from_tf=lowercase_ )
snake_case_ : str = AutoModelForSeqaSeqLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : int = TFAutoModelForQuestionAnswering.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : int = AutoModelForQuestionAnswering.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self : Tuple ):
snake_case_ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
snake_case_ : Any = AutoModelWithLMHead.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
snake_case_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
| 708
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __lowercase ( _a ):
return np.dot(_a , _a )
class _UpperCAmelCase :
def __init__( self : int , *,
lowercase_ : float = np.inf , lowercase_ : str = "linear" , lowercase_ : float = 0.0 , ):
snake_case_ : Optional[Any] = regularization
snake_case_ : Tuple = gamma
if kernel == "linear":
snake_case_ : int = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case_ : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case_ : List[Any] = f"Unknown kernel: {kernel}"
raise ValueError(lowercase_ )
def _snake_case ( self : int , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.dot(lowercase_ , lowercase_ )
def _snake_case ( self : int , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self : Any , lowercase_ : list[ndarray] , lowercase_ : ndarray ):
snake_case_ : Union[str, Any] = observations
snake_case_ : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case_), ) : List[str] = np.shape(lowercase_ )
def to_minimize(lowercase_ : ndarray ) -> float:
snake_case_ : Tuple = 0
((snake_case_), ) : Optional[Any] = np.shape(lowercase_ )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase_ )
snake_case_ : Optional[Any] = LinearConstraint(lowercase_ , 0 , 0 )
snake_case_ : str = Bounds(0 , self.regularization )
snake_case_ : int = minimize(
lowercase_ , np.ones(lowercase_ ) , bounds=lowercase_ , constraints=[ly_contraint] ).x
snake_case_ : Optional[Any] = l_star
# calculating mean offset of separation plane to points
snake_case_ : List[Any] = 0
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case_ : Union[str, Any] = s / n
def _snake_case ( self : List[str] , lowercase_ : ndarray ):
snake_case_ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485
| 0
|
"""simple docstring"""
import math
import os
import sys
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = ""
try:
with open(A__ ,"rb" ) as binary_file:
UpperCAmelCase_ : Tuple = binary_file.read()
for dat in data:
UpperCAmelCase_ : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def snake_case ( A__ ,A__ ,A__ ,A__ ):
lexicon.pop(A__ )
UpperCAmelCase_ : List[str] = last_match_id
if math.loga(A__ ).is_integer():
for curr_key in lexicon:
UpperCAmelCase_ : Optional[Any] = "0" + lexicon[curr_key]
UpperCAmelCase_ : Dict = bin(A__ )[2:]
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = {"0": "0", "1": "1"}
UpperCAmelCase_ , UpperCAmelCase_ : Any = "", ""
UpperCAmelCase_ : Dict = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A__ ,A__ ,A__ ,A__ )
index += 1
UpperCAmelCase_ : List[str] = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase_ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
return result
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Dict = os.path.getsize(A__ )
UpperCAmelCase_ : int = bin(A__ )[2:]
UpperCAmelCase_ : Optional[int] = len(A__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = 8
try:
with open(A__ ,"wb" ) as opened_file:
UpperCAmelCase_ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 ,len(A__ ) ,A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A__ ,2 ).to_bytes(1 ,byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[Any] = read_file_binary(A__ )
UpperCAmelCase_ : List[str] = compress_data(A__ )
UpperCAmelCase_ : Tuple = add_file_length(A__ ,A__ )
write_file_binary(A__ ,A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 95
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(A__ ) ,version.parse(A__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def snake_case ( A__ ,A__ = None ):
UpperCAmelCase_ : int = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = requirement, None, None
else:
UpperCAmelCase_ : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = match[0]
UpperCAmelCase_ : Optional[Any] = want_full.split("," ) # there could be multiple requirements
UpperCAmelCase_ : int = {}
for w in want_range:
UpperCAmelCase_ : str = re.findall(r"^([\s!=<>]{1,2})(.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : Any = match[0]
UpperCAmelCase_ : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase_ : List[Any] = ".".join([str(A__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
return
# check if any version is installed
try:
UpperCAmelCase_ : str = importlib.metadata.version(A__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Any = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(A__ ,A__ )
| 95
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""canine"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6_3_8_4 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : str=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1E-12 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=0xE_0_0_0 , SCREAMING_SNAKE_CASE__ : int=0xE_0_0_1 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : List[str]=1_6_3_8_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2_8 , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
# Character config:
__a = downsampling_rate
__a = upsampling_kernel_size
__a = num_hash_functions
__a = num_hash_buckets
__a = local_transformer_stride
| 201
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1_3 , SCREAMING_SNAKE_CASE__ : int=3_0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Dict=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : str=0.0_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=2 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def __a ( self : Any ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a = ViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = ViTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = ViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : str ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :str =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a_ :Tuple =(
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
a_ :List[str] =True
a_ :str =False
a_ :Optional[int] =False
a_ :Tuple =False
def __a ( self : str ):
'''simple docstring'''
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : int ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __a ( self : int ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(SCREAMING_SNAKE_CASE__ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) -> int:
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __a ( self : Dict ):
'''simple docstring'''
__a = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(SCREAMING_SNAKE_CASE__ )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__a = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__a = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __a ( self : int ):
'''simple docstring'''
__a = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(SCREAMING_SNAKE_CASE__ )
__a = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_8_0 )
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE__ )
# verify the logits
__a = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
__a = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )
| 201
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ : Any = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(snake_case_ )
UpperCamelCase : Dict = flatten_dict(snake_case_ )
return flax_params
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Tuple = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
UpperCamelCase : Optional[int] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase : List[str] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase : List[str] = new_key.replace(snake_case_ ,snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase : str = new_key.replace(snake_case_ ,snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase : List[str] = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Union[str, Any] = new_key.replace("""encoder""" ,"""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase : Any = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Any = flax_dict[key]
UpperCamelCase : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase : Union[str, Any] = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Dict ,snake_case_ : str=False ,snake_case_ : Dict=False ):
'''simple docstring'''
UpperCamelCase : Optional[int] = get_flax_param(snake_case_ )
if not use_large:
UpperCamelCase : List[str] = PixaStructVisionConfig()
UpperCamelCase : int = PixaStructTextConfig()
else:
UpperCamelCase : List[str] = PixaStructVisionConfig(
hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_attention_heads=2_4 ,num_hidden_layers=1_8 )
UpperCamelCase : Tuple = PixaStructTextConfig(hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_heads=2_4 ,num_layers=1_8 )
UpperCamelCase : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=snake_case_ )
UpperCamelCase : Optional[int] = PixaStructForConditionalGeneration(snake_case_ )
UpperCamelCase : Optional[Any] = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
UpperCamelCase : Dict = PixaStructImageProcessor()
UpperCamelCase : List[str] = PixaStructProcessor(image_processor=snake_case_ ,tokenizer=snake_case_ )
if use_large:
UpperCamelCase : int = 4_0_9_6
UpperCamelCase : int = True
# mkdir if needed
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("""Model saved in {}""".format(snake_case_ ) )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 499
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["DPTFeatureExtractor"]
_UpperCamelCase = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = original_name.split('''.''' )[0]
__lowerCAmelCase : Dict = key.split('''.''' )
__lowerCAmelCase : Any = int(key_list[key_list.index(lowercase__ ) - 2] )
__lowerCAmelCase : Any = int(key_list[key_list.index(lowercase__ ) - 1] )
__lowerCAmelCase : List[Any] = orig_block_num - offset
__lowerCAmelCase : int = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = OrderedDict()
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__lowerCAmelCase : Optional[Any] = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase : str = key[: key.find('''proj''' )]
__lowerCAmelCase : Optional[Any] = key.replace(lowercase__ , f"""patch_embeddings.{total_embed_found}.""" )
__lowerCAmelCase : Optional[Any] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase : List[Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__lowerCAmelCase : Any = replace_key_with_offset(lowercase__ , lowercase__ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__lowerCAmelCase : int = replace_key_with_offset(lowercase__ , lowercase__ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__lowerCAmelCase : Any = replace_key_with_offset(lowercase__ , lowercase__ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__lowerCAmelCase : List[Any] = key.replace('''head''' , '''classifier''' )
__lowerCAmelCase : Dict = value
return new_state_dict
def _lowercase ( ):
__lowerCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : str = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase : List[Any] = '''huggingface/label-files'''
__lowerCAmelCase : Union[str, Any] = model_name[-3:]
__lowerCAmelCase : Dict = 1_0_0_0
__lowerCAmelCase : List[str] = '''imagenet-1k-id2label.json'''
__lowerCAmelCase : List[str] = (1, 1_0_0_0)
# set config attributes
__lowerCAmelCase : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : Union[str, Any] = idalabel
__lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase : int = [2, 2, 6, 2]
__lowerCAmelCase : Any = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase : Union[str, Any] = 4.0
__lowerCAmelCase : str = 0.9
elif size == "s24":
__lowerCAmelCase : List[str] = [4, 4, 1_2, 4]
__lowerCAmelCase : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase : Optional[int] = 4.0
__lowerCAmelCase : Optional[int] = 0.9
elif size == "s36":
__lowerCAmelCase : Optional[int] = [6, 6, 1_8, 6]
__lowerCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase : str = 4.0
__lowerCAmelCase : str = 1E-6
__lowerCAmelCase : Optional[Any] = 0.9
elif size == "m36":
__lowerCAmelCase : Any = [6, 6, 1_8, 6]
__lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase : Any = 4.0
__lowerCAmelCase : Any = 1E-6
__lowerCAmelCase : Any = 0.9_5
elif size == "m48":
__lowerCAmelCase : Union[str, Any] = [8, 8, 2_4, 8]
__lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase : int = 4.0
__lowerCAmelCase : int = 1E-6
__lowerCAmelCase : List[Any] = 0.9_5
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
__lowerCAmelCase : List[str] = PoolFormerImageProcessor(crop_pct=lowercase__ )
# Prepare image
__lowerCAmelCase : Tuple = prepare_img()
__lowerCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
__lowerCAmelCase : str = torch.load(lowercase__ , map_location=torch.device('''cpu''' ) )
# rename keys
__lowerCAmelCase : List[Any] = rename_keys(lowercase__ )
# create HuggingFace model and load state dict
__lowerCAmelCase : Any = PoolFormerForImageClassification(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Define image processor
__lowerCAmelCase : Dict = PoolFormerImageProcessor(crop_pct=lowercase__ )
__lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__lowerCAmelCase : List[Any] = model(lowercase__ )
__lowerCAmelCase : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase : Dict = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
__lowerCAmelCase : int = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
__lowerCAmelCase : Optional[int] = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
__lowerCAmelCase : List[Any] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
__lowerCAmelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_UpperCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 583
| 1
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
SCREAMING_SNAKE_CASE__ = '''zero2'''
SCREAMING_SNAKE_CASE__ = '''zero3'''
SCREAMING_SNAKE_CASE__ = [ZEROa, ZEROa]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a : Optional[int] = parameterized.to_safe_name('_'.join(str(lowerCamelCase_ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _UpperCamelCase( __lowerCamelCase ):
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Dict = models[model]
__a : str = self.run_trainer(
stage=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , eval_steps=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
self.do_checks(SCREAMING_SNAKE_CASE__ )
return output_dir
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Union[str, Any] = self.get_auto_remove_tmp_dir('./xxx' , after=SCREAMING_SNAKE_CASE__ )
__a : int = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a : Tuple = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__a : Any = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__a : str = self.get_launcher(SCREAMING_SNAKE_CASE__ )
__a : int = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
return output_dir
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
__a : Optional[int] = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 47
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase = concatenate_datasets
lowerCamelCase = DownloadConfig
lowerCamelCase = DownloadManager
lowerCamelCase = DownloadMode
lowerCamelCase = DownloadConfig
lowerCamelCase = DownloadMode
lowerCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 720
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _a ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = " " ):
"""simple docstring"""
a__ : List[Any] = sentence_delimiter
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return list(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = []
for sent_idx, sentence in enumerate(__UpperCAmelCase ):
chars.extend(self.process_string(__UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
a__ : Any = 0
a__ : int = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Tuple = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 207
| 0
|
def lowercase__ ( A_: list[int] ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
for i in range(A_ ):
for j in range(i + 1 , A_ ):
if numbers[j] < numbers[i]:
__UpperCAmelCase , __UpperCAmelCase =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 68
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowercase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ) -> Tuple:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE_ : Any ='''lm_head'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] =getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] =getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
SCREAMING_SNAKE_CASE_ : int =hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : List[str] =value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Dict =value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : int =value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Dict =value
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[str] =[]
SCREAMING_SNAKE_CASE_ : int =fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : List[str] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : List[str] =False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE_ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ : Dict =True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : List[Any] =name.split(UpperCAmelCase_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ : str =mapped_key.replace('''*''' , UpperCAmelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : int ='''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : Any ='''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Any ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ : List[str] ='''weight'''
else:
SCREAMING_SNAKE_CASE_ : List[str] =None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> str:
SCREAMING_SNAKE_CASE_ : int =full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ : List[str] =name.split('''.''' )
SCREAMING_SNAKE_CASE_ : Dict =int(items[0] )
SCREAMING_SNAKE_CASE_ : int =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Tuple =value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Tuple =value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ : str =value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=True ) -> Union[str, Any]:
if config_path is not None:
SCREAMING_SNAKE_CASE_ : str =UniSpeechConfig.from_pretrained(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Any =UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Dictionary.load_from_json(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ : Dict =target_dict.pad_index
SCREAMING_SNAKE_CASE_ : int =target_dict.bos_index
SCREAMING_SNAKE_CASE_ : Optional[Any] =target_dict.eos_index
SCREAMING_SNAKE_CASE_ : Union[str, Any] =len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ : int =os.path.join(UpperCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ : Dict =4_2
SCREAMING_SNAKE_CASE_ : List[Any] =4_3
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE_ : Any =True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE_ : int =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] =WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =UniSpeechForCTC(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Any =UniSpeechForPreTraining(UpperCAmelCase_ )
if is_finetuned:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model[0].eval()
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_unispeech.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowercase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 431
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
def wrapper(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE_ : Optional[int] =timeit.default_timer()
SCREAMING_SNAKE_CASE_ : Dict =func(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE_ : Any =func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Any=1_0_0 , UpperCAmelCase_ : Dict=None ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : List[str] =seq_shapes or {}
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
SCREAMING_SNAKE_CASE_ : Any =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''The small grey turtle was surprisingly fast when challenged.'''
else:
SCREAMING_SNAKE_CASE_ : int =np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
SCREAMING_SNAKE_CASE_ : Tuple =v.feature
SCREAMING_SNAKE_CASE_ : Optional[Any] =seq_shapes[k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=1_0_0 , UpperCAmelCase_ : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE_ : List[Any] =features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
SCREAMING_SNAKE_CASE_ : List[Any] =datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 431
| 1
|
from __future__ import annotations
_lowerCamelCase : Optional[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowercase :
def __init__( self : List[str] , _UpperCamelCase : dict[str, list[str]] , _UpperCamelCase : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = source_vertex
def __snake_case( self : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.source_vertex}
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = vertex
queue.append(_UpperCamelCase )
def __snake_case( self : Dict , _UpperCamelCase : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE = self.parent.get(_UpperCamelCase )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase )
return self.shortest_path(_UpperCamelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 403
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase ( a ):
lowercase__ : Any = """altclip_text_model"""
def __init__( self : str , _UpperCamelCase : Dict=250_002 , _UpperCamelCase : List[Any]=1_024 , _UpperCamelCase : str=24 , _UpperCamelCase : str=16 , _UpperCamelCase : Tuple=4_096 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : int=514 , _UpperCamelCase : int=1 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : Union[str, Any]=1e-05 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Union[str, Any]=768 , **_UpperCamelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = project_dim
class lowercase ( a ):
lowercase__ : Any = """altclip_vision_model"""
def __init__( self : Dict , _UpperCamelCase : Optional[int]=768 , _UpperCamelCase : Optional[int]=3_072 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Union[str, Any]=12 , _UpperCamelCase : List[Any]=12 , _UpperCamelCase : int=3 , _UpperCamelCase : List[Any]=224 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Optional[Any]="quick_gelu" , _UpperCamelCase : int=1e-5 , _UpperCamelCase : str=0.0 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Any=1.0 , **_UpperCamelCase : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def __snake_case( cls : Tuple , _UpperCamelCase : Union[str, os.PathLike] , **_UpperCamelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
SCREAMING_SNAKE_CASE = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class lowercase ( a ):
lowercase__ : Dict = """altclip"""
lowercase__ : Any = True
def __init__( self : int , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=None , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : int=2.6_5_9_2 , **_UpperCamelCase : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("text_config_dict" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = kwargs.pop("vision_config_dict" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `text_config_dict`.
SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**_UpperCamelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
SCREAMING_SNAKE_CASE = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_UpperCamelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `vision_config_dict`.
SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**_UpperCamelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
SCREAMING_SNAKE_CASE = {
str(_UpperCamelCase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
SCREAMING_SNAKE_CASE = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_UpperCamelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = logit_scale_init_value
SCREAMING_SNAKE_CASE = 1.0
@classmethod
def __snake_case( cls : Union[str, Any] , _UpperCamelCase : AltCLIPTextConfig , _UpperCamelCase : AltCLIPVisionConfig , **_UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 403
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
A__ = 0
A__ = number
while duplicate > 0:
A__ , A__ = divmod(lowercase_ , 10 )
fact_sum += factorial(lowercase_ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
_lowerCamelCase : Optional[int] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 177
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''xlm'''
UpperCAmelCase__ = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]=30_145 , UpperCAmelCase__ : List[str]=2_048 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[str]=2_048**-0.5 , UpperCAmelCase__ : List[Any]=1e-12 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple="first" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : str=0 , **UpperCAmelCase__ : List[str] , ) ->str:
'''simple docstring'''
A__ = vocab_size
A__ = emb_dim
A__ = n_layers
A__ = n_heads
A__ = dropout
A__ = attention_dropout
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = use_lang_emb
A__ = layer_norm_eps
A__ = bos_index
A__ = eos_index
A__ = pad_index
A__ = unk_index
A__ = mask_index
A__ = is_encoder
A__ = max_position_embeddings
A__ = embed_init_std
A__ = init_std
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_proj_to_labels
A__ = summary_first_dropout
A__ = start_n_top
A__ = end_n_top
A__ = mask_token_id
A__ = lang_id
if "n_words" in kwargs:
A__ = kwargs['''n_words''']
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 177
| 1
|
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
while a != 0:
__a , __a : Dict = b % a, a
return b
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) != 1:
__a : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(lowerCamelCase_ )
__a , __a , __a : Dict = 1, 0, a
__a , __a , __a : List[str] = 0, 1, m
while va != 0:
__a : Union[str, Any] = ua // va
__a , __a , __a , __a , __a , __a : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 47
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
snake_case : List[str] = [8, 5, 9, 7]
snake_case : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
__lowercase = claim_vector
__lowercase = allocated_resources_table
__lowercase = maximum_claim_table
def __magic_name__ ( self ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ):
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.__need()
__lowercase = self.__allocated_resources_table
__lowercase = self.__available_resources()
__lowercase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 5_0 + """\n""" )
while need_list:
__lowercase = False
for each_need in need_list:
__lowercase = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
__lowercase = False
break
if execution:
__lowercase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
__lowercase = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __magic_name__ ( self ):
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : str = {"""vocab_file""": """spm_char.model"""}
__snake_case : Tuple = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
__snake_case : str = {
"""microsoft/speecht5_asr""": 10_24,
"""microsoft/speecht5_tts""": 10_24,
"""microsoft/speecht5_vc""": 10_24,
}
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowerCAmelCase__ = []
else:
current_sub_tokens.append(_UpperCamelCase )
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = [1]
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + suffix_ones
return ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 365
|
from __future__ import annotations
import pandas as pd
def _UpperCamelCase ( UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = burst_time[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 9_9999_9999
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase__ = remaining_time[j]
lowerCAmelCase__ = j
lowerCAmelCase__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase__ = remaining_time[short]
if minm == 0:
lowerCAmelCase__ = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase__ = False
# Find finish time of current process
lowerCAmelCase__ = increment_time + 1
# Calculate waiting time
lowerCAmelCase__ = finish_time - arrival_time[short]
lowerCAmelCase__ = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase__ = 0
# Increment time
increment_time += 1
return waiting_time
def _UpperCamelCase ( UpperCamelCase_ : list[int] , UpperCamelCase_ : int , UpperCamelCase_ : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = burst_time[i] + waiting_time[i]
return turn_around_time
def _UpperCamelCase ( UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = total_waiting_time + waiting_time[i]
lowerCAmelCase__ = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__snake_case : Dict = int(input())
__snake_case : List[Any] = [0] * no_of_processes
__snake_case : str = [0] * no_of_processes
__snake_case : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__snake_case , __snake_case : Any = map(int, input().split())
__snake_case : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case : Dict = burst_time
__snake_case : Optional[int] = no_of_processes
__snake_case : str = waiting_time
__snake_case : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case : Dict = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 365
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( a ):
'''simple docstring'''
__lowerCAmelCase : Dict = ["image_processor", "tokenizer"]
__lowerCAmelCase : List[Any] = "BlipImageProcessor"
__lowerCAmelCase : Optional[int] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a_ , a_ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = False
super().__init__(a_ , a_ )
UpperCAmelCase = self.image_processor
def __call__( self , a_ = None , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCAmelCase = self.tokenizer
UpperCAmelCase = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
return text_encoding
# add pixel_values
UpperCAmelCase = self.image_processor(a_ , return_tensors=a_ )
if text is not None:
UpperCAmelCase = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
else:
UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(a_ )
return encoding_image_processor
def snake_case_ ( self , *a_ , **a_ ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def snake_case_ ( self , *a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 447
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
'''simple docstring'''
@staticmethod
def snake_case_ ( *a_ , **a_ ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Image ):
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Image ):
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Tuple = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase : Tuple = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def snake_case_ ( self , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = MaskGenerationPipeline(model=a_ , image_processor=a_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
@require_torch
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
UpperCAmelCase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(a_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = 'facebook/sam-vit-huge'
UpperCAmelCase = pipeline('mask-generation' , model=a_ )
UpperCAmelCase = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(a_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0053},
] , )
| 447
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 600
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( lowercase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : str = analyze_text(lowercase__ )
lowerCAmelCase_ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase_ : Any = sum(single_char_strings.values() )
# one length string
lowerCAmelCase_ : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase_ : Union[str, Any] = single_char_strings[ch]
lowerCAmelCase_ : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase__ ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowerCAmelCase_ : Any = sum(two_char_strings.values() )
lowerCAmelCase_ : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase_ : List[str] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase_ : Any = two_char_strings[sequence]
lowerCAmelCase_ : Optional[Any] = int(lowercase__ ) / all_sum
my_sec_sum += prob * math.loga(lowercase__ )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __UpperCamelCase ( lowercase__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
lowerCAmelCase_ : Any = Counter() # type: ignore
lowerCAmelCase_ : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowercase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 600
| 1
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , lowercase_=None ) -> Dict:
UpperCAmelCase = data
UpperCAmelCase = None
def __repr__( self ) -> Optional[Any]:
UpperCAmelCase = []
UpperCAmelCase = self
while temp:
string_rep.append(F"{temp.data}" )
UpperCAmelCase = temp.next
return "->".join(lowercase_ )
def lowercase__ ( lowerCAmelCase : list ) -> List[Any]:
"""simple docstring"""
if not elements_list:
raise Exception('The Elements List is empty' )
UpperCAmelCase = UpperCAmelCase = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase ) ):
UpperCAmelCase = Node(elements_list[i] )
UpperCAmelCase = current.next
return head
def lowercase__ ( lowerCAmelCase : Node ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(lowerCAmelCase , lowerCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
from doctest import testmod
testmod()
UpperCAmelCase = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCAmelCase )
print('Elements in Reverse:' )
print_reverse(lowerCAmelCase )
if __name__ == "__main__":
main()
| 183
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 148
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Dict[Optional[str], Type[Formatter]] ={}
lowerCAmelCase__ : Dict[Optional[str], str] ={}
lowerCAmelCase__ : Dict[Optional[str], Exception] ={}
def __lowercase ( a__ , a__ , a__ = None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
__SCREAMING_SNAKE_CASE = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
__SCREAMING_SNAKE_CASE = format_type
def __lowercase ( a__ , a__ , a__ = None ) -> List[str]:
__SCREAMING_SNAKE_CASE = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__SCREAMING_SNAKE_CASE = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase__ : List[Any] =ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase__ : Optional[Any] =ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase__ : Optional[int] =ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowercase ( a__ ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowercase ( a__ , **a__ ) -> Formatter:
__SCREAMING_SNAKE_CASE = get_format_type_from_alias(a__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**a__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 148
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = int(snake_case__ )
# Initialize Result
lowerCAmelCase__ : List[Any] = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
snake_case = []
snake_case = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
snake_case = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
snake_case = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
snake_case = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
snake_case = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'Following is minimal change for {value}: ')
snake_case = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 700
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = torch.nn.Linear(2 , 4 )
lowerCAmelCase__ : List[str] = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase__ : List[Any] = torch.optim.lr_scheduler.OneCycleLR(lowerCamelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase__ : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase__ : int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCamelCase_ )
class lowerCAmelCase ( UpperCamelCase_ ):
@require_cuda
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
lowerCAmelCase__ : int = Accelerator(cpu=a__ )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ : Union[str, Any] = GradientState()
assert state.num_steps == 1
lowerCAmelCase__ : Any = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase__ : Tuple = False
assert state.sync_gradients is False
GradientState._reset_state()
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = create_components()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Dict = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _A ( self : Dict ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ : Optional[int] , **a__ : List[Any] ):
pass
with patch("torch.cuda.set_device" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowerCAmelCase__ : Any = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
lowerCAmelCase__ : int = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
lowerCAmelCase__ : List[str] = get_signature(a__ )
# saving hook
def save_config(a__ : List[Any] , a__ : Tuple , a__ : List[Any] ):
lowerCAmelCase__ : Any = {"class_name": models[0].__class__.__name__}
with open(os.path.join(a__ , "data.json" ) , "w" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ : Any , a__ : Tuple ):
with open(os.path.join(a__ , "data.json" ) , "r" ) as f:
lowerCAmelCase__ : str = json.load(a__ )
lowerCAmelCase__ : Tuple = config["class_name"]
lowerCAmelCase__ : List[Any] = accelerator.register_save_state_pre_hook(a__ )
lowerCAmelCase__ : Optional[Any] = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase__ : Tuple = "random"
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase__ : Union[str, Any] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = create_components()
lowerCAmelCase__ : str = None
# This should work
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = create_components()
lowerCAmelCase__ : Dict = [1, 2, 3]
# This should work
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def _A ( self : List[str] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a__ , device_map={"": 0} , )
lowerCAmelCase__ : Optional[Any] = Accelerator()
# This should work
lowerCAmelCase__ : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def _A ( self : Optional[int] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowerCAmelCase__ : Optional[Any] = Accelerator()
with init_empty_weights():
lowerCAmelCase__ : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowerCAmelCase__ : int = infer_auto_device_map(a__ )
lowerCAmelCase__ : str = "cpu"
lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
lowerCAmelCase__ : Tuple = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def _A ( self : Optional[int] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowerCAmelCase__ : Optional[Any] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase__ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowerCAmelCase__ : List[Any] = infer_auto_device_map(a__ )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a__ , device_map=a__ , )
lowerCAmelCase__ : Dict = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
lowerCAmelCase__ : List[Any] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _A ( self : Dict ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowerCAmelCase__ : Union[str, Any] = infer_auto_device_map(a__ )
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a__ , device_map=a__ , )
lowerCAmelCase__ : int = Accelerator()
# This should work
lowerCAmelCase__ : int = accelerator.prepare(a__ )
@require_cuda
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.nn.Linear(10 , 10 )
lowerCAmelCase__ : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCAmelCase__ : List[Any] = Accelerator(cpu=a__ )
lowerCAmelCase__ : int = accelerator.prepare(a__ )
| 568
| 0
|
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[Any] = ["onnx"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(self, ['''onnx'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''onnx'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''onnx'''] )
| 253
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253
| 1
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case ( unittest.TestCase):
__UpperCamelCase = JukeboxTokenizer
__UpperCamelCase = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
import torch
_A = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
_A = tokenizer(**self.metas )["input_ids"]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
_A = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
_A = tokenizer(**self.metas )["input_ids"]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 621
|
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main()
| 621
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase: Union[str, Any] ={
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict =[
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 607
|
"""simple docstring"""
import qiskit
def __snake_case ( __A ,__A ) -> qiskit.result.counts.Counts:
lowercase : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowercase : int = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowercase : Any = qiskit.execute(__A ,__A ,shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__A )
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] =half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 607
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Tuple = """deberta-v2"""
def __init__( self , snake_case__=12_8100 , snake_case__=1536 , snake_case__=24 , snake_case__=24 , snake_case__=6144 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0 , snake_case__=0.0_2 , snake_case__=1e-7 , snake_case__=False , snake_case__=-1 , snake_case__=0 , snake_case__=True , snake_case__=None , snake_case__=0 , snake_case__="gelu" , **snake_case__ , ):
super().__init__(**snake_case__ )
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Any = relative_attention
lowerCAmelCase : Dict = max_relative_positions
lowerCAmelCase : int = pad_token_id
lowerCAmelCase : Union[str, Any] = position_biased_input
# Backwards compatibility
if type(snake_case__ ) == str:
lowerCAmelCase : int = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : List[str] = pos_att_type
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : Dict = kwargs.get('pooler_hidden_size' , snake_case__ )
lowerCAmelCase : Optional[int] = pooler_dropout
lowerCAmelCase : Optional[Any] = pooler_hidden_act
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase ( self ):
return 12
def lowercase ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , snake_case__ = None , ):
lowerCAmelCase : Tuple = super().generate_dummy_inputs(preprocessor=snake_case__ , framework=snake_case__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
"""simple docstring"""
from math import ceil
def _a ( UpperCAmelCase__ = 10_01 ) -> int:
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__SCREAMING_SNAKE_CASE = 2 * i + 1
__SCREAMING_SNAKE_CASE = 2 * i
__SCREAMING_SNAKE_CASE = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase__ =int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 482
|
import cva
import numpy as np
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ : Optional[Any] = k
SCREAMING_SNAKE_CASE_ : Optional[int] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
return str(self.k )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = cva.imread(__lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = img.shape
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : List[Any] = img.copy()
SCREAMING_SNAKE_CASE_ : Dict = cva.cvtColor(__lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = np.gradient(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = dx**2
SCREAMING_SNAKE_CASE_ : Dict = dy**2
SCREAMING_SNAKE_CASE_ : Dict = dx * dy
SCREAMING_SNAKE_CASE_ : int = 0.04
SCREAMING_SNAKE_CASE_ : Any = self.window_size // 2
for y in range(__lowerCAmelCase , h - offset ):
for x in range(__lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ : List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ : List[str] = wxx + wyy
SCREAMING_SNAKE_CASE_ : Tuple = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__: Tuple = HarrisCorner(0.04, 3)
lowerCAmelCase__ , lowerCAmelCase__: Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 345
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCamelCase_ = (7_20, 12_80) # Height, Width
UpperCamelCase_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCamelCase_ = 1 / 1_00
UpperCamelCase_ = ''
UpperCamelCase_ = ''
UpperCamelCase_ = ''
UpperCamelCase_ = 2_50
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =get_dataset(A , A )
for index in range(A ):
UpperCAmelCase__ =random.sample(range(len(A ) ) , 4 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =update_image_and_anno(
A , A , A , A , A , filter_scale=A , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase__ =random_chars(32 )
UpperCAmelCase__ =path.split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase__ =F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
UpperCAmelCase__ =[]
for anno in new_annos:
UpperCAmelCase__ =anno[3] - anno[1]
UpperCAmelCase__ =anno[4] - anno[2]
UpperCAmelCase__ =anno[1] + width / 2
UpperCAmelCase__ =anno[2] + height / 2
UpperCAmelCase__ =F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(A )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
for label_file in glob.glob(os.path.join(A , "*.txt" ) ):
UpperCAmelCase__ =label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(A ) as in_file:
UpperCAmelCase__ =in_file.readlines()
UpperCAmelCase__ =os.path.join(A , F"""{label_name}.jpg""" )
UpperCAmelCase__ =[]
for obj_list in obj_lists:
UpperCAmelCase__ =obj_list.rstrip("\n" ).split(" " )
UpperCAmelCase__ =float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase__ =float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase__ =float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase__ =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A )
labels.append(A )
return img_paths, labels
def _UpperCAmelCase ( A , A , A , A , A , A = 0.0 , ):
'''simple docstring'''
UpperCAmelCase__ =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase__ =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase__ =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase__ =int(scale_x * output_size[1] )
UpperCAmelCase__ =int(scale_y * output_size[0] )
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
for i, index in enumerate(A ):
UpperCAmelCase__ =all_img_list[index]
path_list.append(A )
UpperCAmelCase__ =all_annos[index]
UpperCAmelCase__ =cva.imread(A )
if i == 0: # top-left
UpperCAmelCase__ =cva.resize(A , (divid_point_x, divid_point_y) )
UpperCAmelCase__ =img
for bbox in img_annos:
UpperCAmelCase__ =bbox[1] * scale_x
UpperCAmelCase__ =bbox[2] * scale_y
UpperCAmelCase__ =bbox[3] * scale_x
UpperCAmelCase__ =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase__ =cva.resize(A , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase__ =img
for bbox in img_annos:
UpperCAmelCase__ =scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase__ =bbox[2] * scale_y
UpperCAmelCase__ =scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase__ =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase__ =cva.resize(A , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase__ =img
for bbox in img_annos:
UpperCAmelCase__ =bbox[1] * scale_x
UpperCAmelCase__ =scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase__ =bbox[3] * scale_x
UpperCAmelCase__ =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase__ =cva.resize(
A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase__ =img
for bbox in img_annos:
UpperCAmelCase__ =scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase__ =scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase__ =scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase__ =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase__ =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _UpperCAmelCase ( A ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase__ =ascii_lowercase + digits
return "".join(random.choice(A ) for _ in range(A ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 510
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'BridgeTowerImageProcessor'
__UpperCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self, A_, A_ ) -> Dict:
super().__init__(A_, A_ )
def __call__( self, A_, A_ = None, A_ = True, A_ = False, A_ = None, A_ = None, A_ = 0, A_ = None, A_ = None, A_ = None, A_ = False, A_ = False, A_ = False, A_ = False, A_ = True, A_ = None, **A_, ) -> BatchEncoding:
UpperCAmelCase__ =self.tokenizer(
text=A_, add_special_tokens=A_, padding=A_, truncation=A_, max_length=A_, stride=A_, pad_to_multiple_of=A_, return_token_type_ids=A_, return_attention_mask=A_, return_overflowing_tokens=A_, return_special_tokens_mask=A_, return_offsets_mapping=A_, return_length=A_, verbose=A_, return_tensors=A_, **A_, )
# add pixel_values + pixel_mask
UpperCAmelCase__ =self.image_processor(
A_, return_tensors=A_, do_normalize=A_, do_center_crop=A_, **A_ )
encoding.update(A_ )
return encoding
def __UpperCAmelCase ( self, *A_, **A_ ) -> Any:
return self.tokenizer.batch_decode(*A_, **A_ )
def __UpperCAmelCase ( self, *A_, **A_ ) -> Tuple:
return self.tokenizer.decode(*A_, **A_ )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.tokenizer.model_input_names
UpperCAmelCase__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 510
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 569
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCAmelCase = """sshleifer/bart-tiny-random"""
_lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return AutoConfig.from_pretrained(a__ )
def _lowerCamelCase ( self ):
A_ , *A_ : Optional[Any] = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCamelCase ( self ):
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
def _lowerCamelCase ( self ):
A_ , *A_ : int = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCamelCase ( self ):
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCamelCase ( self ):
with self.assertRaises(a__ ):
create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=a__ , d=a__ )
| 569
| 1
|
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> int:
"""simple docstring"""
a_ : Dict = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a_ : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a_ : Optional[Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 443
|
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
a_ : List[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 443
| 1
|
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: List[Any] ) -> Optional[int]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Optional[Any] ) -> str:
UpperCamelCase__ : Dict = [[float('''inf''' ) for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCAmelCase ):
# looping through rows of graph array
for i in range(__UpperCAmelCase ):
# looping through columns of graph array
for j in range(__UpperCAmelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase__ : Union[str, Any] = dist[i][k] + dist[k][j]
_print_dist(__UpperCAmelCase , __UpperCAmelCase )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ = int(input('Enter number of vertices: '))
UpperCAmelCase_ = int(input('Enter number of edges: '))
UpperCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCAmelCase_ = int(input('Enter source:'))
UpperCAmelCase_ = int(input('Enter destination:'))
UpperCAmelCase_ = float(input('Enter weight:'))
UpperCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 253
|
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[str] ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = [1]
for i in range(2 , __UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Any = list(range(__UpperCAmelCase ) )
# Find permutation
while factorials:
UpperCamelCase__ : Tuple = factorials.pop()
UpperCamelCase__ ,UpperCamelCase__ : Any = divmod(__UpperCAmelCase , __UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =TextToVideoSDPipeline
lowerCamelCase : Optional[Any] =TEXT_TO_IMAGE_PARAMS
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCamelCase : Union[str, Any] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowerCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCAmelCase : List[Any] = CLIPTextModel(lowerCAmelCase )
__lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase ).startswith("""mps""" ):
__lowerCAmelCase : List[Any] = torch.manual_seed(lowerCAmelCase )
else:
__lowerCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowerCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : int = TextToVideoSDPipeline(**lowerCAmelCase )
__lowerCAmelCase : List[str] = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowerCAmelCase : Dict = self.get_dummy_inputs(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = """np"""
__lowerCAmelCase : int = sd_pipe(**lowerCAmelCase ).frames
__lowerCAmelCase : Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase : Optional[int] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase : Dict = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase : Union[str, Any] = pipe.to("""cuda""" )
__lowerCAmelCase : Tuple = """Spiderman is surfing"""
__lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="""pt""" ).frames
__lowerCAmelCase : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase : List[str] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase : str = pipe.to("""cuda""" )
__lowerCAmelCase : Any = """Spiderman is surfing"""
__lowerCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : Any = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""pt""" ).frames
__lowerCAmelCase : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 218
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ (__A : Tuple , __A : List[str] , __A : str=None , __A : Any=None , __A : Union[str, Any]=None , __A : str=None , __A : str=None , __A : Tuple=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : int=False , lowerCAmelCase : Any=99 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : List[str]=0.02 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : List[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Dict = initializer_range
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[int] = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , )
__lowerCAmelCase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : str = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowerCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : List[str] = model.decode(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Any = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
__lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =99
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Any = lm_model(input_ids=lowerCAmelCase )
__lowerCAmelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : List[str] = lm_model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Tuple = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : int = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : List[Any] = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase , a_ ):
"""simple docstring"""
lowerCamelCase : Dict =True
lowerCamelCase : List[Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Tuple =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Tuple = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Optional[int] = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Tuple = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowerCAmelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Union[str, Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Optional[Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : Any = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__lowerCAmelCase : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase )
__lowerCAmelCase : str = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__lowerCAmelCase : List[str] = ["""Sam"""]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , return_tensors="""jax""" )
__lowerCAmelCase : Union[str, Any] = model.generate(**lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
__lowerCAmelCase : List[Any] = tokenizer.batch_decode(lowerCAmelCase , **lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 218
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
if isinstance(a__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict=None , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel(lowercase_)
_UpperCamelCase = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def __UpperCAmelCase ( self : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str]=None , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowercase_ , lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_)
_UpperCamelCase = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str , lowercase_ : int , lowercase_ : Dict=None , **lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowercase_ , lowercase_)
_UpperCamelCase = {"vision_model": vision_model, "text_model": text_model}
_UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_)
_UpperCamelCase = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowercase_ , lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_)
_UpperCamelCase = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_)
_UpperCamelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(lowercase_)
_UpperCamelCase = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_)
_UpperCamelCase = after_output[0].numpy()
_UpperCamelCase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase_ , 1e-5)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Any=None , **lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowercase_ , lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_)
_UpperCamelCase = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_)
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowercase_) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size)
_UpperCamelCase = to_atuple(vision_model.config.patch_size)
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowercase_) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float) -> Dict:
"""simple docstring"""
_UpperCamelCase = np.abs((a - b)).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'Difference between torch and flax is {diff} (>= {tol}).')
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_)
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_)
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_)
@slow
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowercase_)
_UpperCamelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(lowercase_)
_UpperCamelCase = model_a(**lowercase_)
_UpperCamelCase = after_outputs[0].numpy()
_UpperCamelCase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase_ , 1e-5)
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert")
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase = random_attention_mask([batch_size, 4])
_UpperCamelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = TFViTModel(lowercase_ , name="vision_model")
_UpperCamelCase = TFBertModel(lowercase_ , name="text_model")
return vision_model, text_model
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = TFViTModelTester(self)
_UpperCamelCase = TFBertModelTester(self)
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta")
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase = random_attention_mask([batch_size, 4])
_UpperCamelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None , **lowercase_ : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowercase_ , lowercase_)
_UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_)
_UpperCamelCase = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_)
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowercase_) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCamelCase = to_atuple(vision_model.config.image_size)
_UpperCamelCase = to_atuple(vision_model.config.patch_size)
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowercase_) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFDeiTModel(lowercase_ , name="vision_model")
_UpperCamelCase = TFRobertaModel(lowercase_ , name="text_model")
return vision_model, text_model
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCamelCase = TFDeiTModelTester(self)
_UpperCamelCase = TFRobertaModelTester(self)
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert")
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase = random_attention_mask([batch_size, 4])
_UpperCamelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFCLIPVisionModel(lowercase_ , name="vision_model")
_UpperCamelCase = TFBertModel(lowercase_ , name="text_model")
return vision_model, text_model
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFCLIPVisionModelTester(self)
_UpperCamelCase = TFBertModelTester(self)
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=lowercase_)
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_UpperCamelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowercase_ , padding=lowercase_ , return_tensors="np")
_UpperCamelCase = model(**lowercase_)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase_ , atol=1e-3))
| 547
|
from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = get_failure_array(a__ )
# 2) Step through text searching for pattern
_UpperCamelCase , _UpperCamelCase = 0, 0 # index into text, pattern
while i < len(a__ ):
if pattern[j] == text[i]:
if j == (len(a__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 0
_UpperCamelCase = 1
while j < len(a__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(a__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = '''abc1abc12'''
lowerCamelCase__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase__ = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = '''ABABX'''
lowerCamelCase__ = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = '''AAAB'''
lowerCamelCase__ = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = '''abcdabcy'''
lowerCamelCase__ = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 547
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = [1]
for i in range(2 , __A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase__ = []
UpperCamelCase__ = list(range(__A ) )
# Find permutation
while factorials:
UpperCamelCase__ = factorials.pop()
UpperCamelCase__ , UpperCamelCase__ = divmod(__A , __A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _UpperCamelCase ( __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = AutoConfig.from_pretrained(__A )
UpperCamelCase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=__A )
UpperCamelCase__ = checkpoints.load_tax_checkpoint(__A )
UpperCamelCase__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
UpperCamelCase__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase__ = F'''layers_{str(__A )}'''
# Self-Attention
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase__ = flax_model.params["encoder"]["block"][str(__A )]["layer"]
UpperCamelCase__ = tax_attention_key
UpperCamelCase__ = tax_attention_out
UpperCamelCase__ = tax_attention_query
UpperCamelCase__ = tax_attention_value
UpperCamelCase__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase__ = tax_mlp_wi_a
UpperCamelCase__ = tax_mlp_wi_a
else:
UpperCamelCase__ = tax_mlp_wi
UpperCamelCase__ = tax_mlp_wo
UpperCamelCase__ = tax_mlp_layer_norm
UpperCamelCase__ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
UpperCamelCase__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase__ = F'''layers_{str(__A )}'''
# Self-Attention
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
UpperCamelCase__ = tax_enc_dec_attention_module["key"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["out"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["query"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase__ = flax_model.params["decoder"]["block"][str(__A )]["layer"]
UpperCamelCase__ = tax_attention_key
UpperCamelCase__ = tax_attention_out
UpperCamelCase__ = tax_attention_query
UpperCamelCase__ = tax_attention_value
UpperCamelCase__ = tax_pre_attention_layer_norm
UpperCamelCase__ = tax_enc_dec_attention_key
UpperCamelCase__ = tax_enc_dec_attention_out
UpperCamelCase__ = tax_enc_dec_attention_query
UpperCamelCase__ = tax_enc_dec_attention_value
UpperCamelCase__ = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase__ = tax_mlp_wi_a
UpperCamelCase__ = tax_mlp_wi_a
else:
UpperCamelCase__ = tax_mlp_wi
UpperCamelCase__ = tax_mlp_wo
UpperCamelCase__ = txa_mlp_layer_norm
UpperCamelCase__ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
UpperCamelCase__ = txa_decoder_norm
# Only for layer 0:
UpperCamelCase__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase__ = tax_model["target"]["token_embedder"]["embedding"]
UpperCamelCase__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__A )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
a__ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 223
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.