code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = 0
UpperCAmelCase = len(lowercase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase = i + 1
else:
UpperCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 78
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78
| 1
|
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__snake_case : int =get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ ="""dummy_data"""
snake_case_ ="""datasets"""
snake_case_ =False
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Union[str, Any] = dataset_name
lowerCAmelCase__ : List[Any] = cache_dir
lowerCAmelCase__ : str = use_local_dummy_data
lowerCAmelCase__ : List[Any] = config
# download_callbacks take a single url as input
lowerCAmelCase__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase__ : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase__ : Optional[Any] = str(__lowerCamelCase )
# to be downloaded
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Any = None
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' ,self.version_name )
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder ,'''dummy_data.zip''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase__ : int = cached_path(
__lowerCamelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCamelCase ,force_extract=__lowerCamelCase )
return os.path.join(__lowerCamelCase ,self.dummy_file_name )
@property
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase__ : Tuple = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,'''/''' ) )
return self._bucket_url
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,'''/''' ).split('''/''' )[:-1] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ) -> Any:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase__ : int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase__ : List[str] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
return self.create_dummy_data_dict(__lowerCamelCase ,__lowerCamelCase )
elif isinstance(__lowerCamelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCamelCase ,__lowerCamelCase )
else:
return self.create_dummy_data_single(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return self.download_and_extract(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.download_and_extract(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
return path
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
for single_url in single_urls:
download_callback(__lowerCamelCase )
else:
lowerCAmelCase__ : Dict = single_urls
download_callback(__lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Optional[Any] = [os.path.join(__lowerCamelCase ,urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) ) for x in single_urls]
else:
lowerCAmelCase__ : Tuple = single_urls
lowerCAmelCase__ : Dict = os.path.join(__lowerCamelCase ,urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) )
lowerCAmelCase__ : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCamelCase ,__lowerCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase__ : Dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase__ : str = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' ,__lowerCamelCase ) ) for url in data_url )
lowerCAmelCase__ : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase__ : Optional[Any] = [data_url[0]] * len(__lowerCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ : Dict = os.path.join(__lowerCamelCase ,urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(__lowerCamelCase )
return dummy_data_list
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ : Dict = os.path.join(__lowerCamelCase ,urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(__lowerCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
def _iter_archive_members(__lowerCamelCase ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase__ : List[Any] = Path(self.dummy_file ).parent
lowerCAmelCase__ : Tuple = path.relative_to(__lowerCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase__ : Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCamelCase )
lowerCAmelCase__ : Any = Path(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = _iter_archive_members(__lowerCamelCase ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(__lowerCamelCase ).as_posix(), file_path.open('''rb''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCamelCase ):
if os.path.basename(__lowerCamelCase ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCamelCase ):
if os.path.basename(__lowerCamelCase ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(__lowerCamelCase ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(__lowerCamelCase ,__lowerCamelCase )
| 362
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Any=True ,lowerCamelCase_ : Tuple="pt"):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {'''add_prefix_space''': True} if isinstance(lowerCamelCase_ ,lowerCamelCase_) and not line.startswith(''' ''') else {}
lowerCAmelCase__ : Union[str, Any] = padding_side
return tokenizer(
[line] ,max_length=lowerCamelCase_ ,padding='''max_length''' if pad_to_max_length else None ,truncation=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ ,)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Any=None ,):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = input_ids.ne(lowerCamelCase_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="train" ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase="" ,) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : str = Path(__lowerCamelCase ).joinpath(type_path + '''.source''' )
lowerCAmelCase__ : int = Path(__lowerCamelCase ).joinpath(type_path + '''.target''' )
lowerCAmelCase__ : Tuple = self.get_char_lens(self.src_file )
lowerCAmelCase__ : Dict = max_source_length
lowerCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCAmelCase__ : Tuple = tokenizer
lowerCAmelCase__ : List[Any] = prefix
if n_obs is not None:
lowerCAmelCase__ : Optional[Any] = self.src_lens[:n_obs]
lowerCAmelCase__ : Any = src_lang
lowerCAmelCase__ : Optional[Any] = tgt_lang
def __len__(self ) -> str:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self ,__lowerCamelCase ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = index + 1 # linecache starts at 1
lowerCAmelCase__ : Any = self.prefix + linecache.getline(str(self.src_file ) ,__lowerCamelCase ).rstrip('''\n''' )
lowerCAmelCase__ : Any = linecache.getline(str(self.tgt_file ) ,__lowerCamelCase ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase__ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__lowerCamelCase ) else self.tokenizer
)
lowerCAmelCase__ : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,__lowerCamelCase ) else self.tokenizer
lowerCAmelCase__ : Union[str, Any] = encode_line(__lowerCamelCase ,__lowerCamelCase ,self.max_source_length ,'''right''' )
lowerCAmelCase__ : Any = encode_line(__lowerCamelCase ,__lowerCamelCase ,self.max_target_length ,'''right''' )
lowerCAmelCase__ : List[str] = source_inputs['''input_ids'''].squeeze()
lowerCAmelCase__ : str = target_inputs['''input_ids'''].squeeze()
lowerCAmelCase__ : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowerCAmelCase__ : Union[str, Any] = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCAmelCase__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCAmelCase__ : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ : Dict = trim_batch(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = trim_batch(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__snake_case : Any =getLogger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : List[List]):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : int = get_git_info()
save_json(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,'''git_log.json'''))
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Tuple=4 ,**lowerCamelCase_ : List[str]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''w''') as f:
json.dump(lowerCamelCase_ ,lowerCamelCase_ ,indent=lowerCamelCase_ ,**lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
with open(lowerCamelCase_) as f:
return json.load(lowerCamelCase_)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : str = git.Repo(search_parent_directories=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {
'''repo_id''': str(lowerCamelCase_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCAmelCase__ ( lowerCamelCase_ : Callable ,lowerCamelCase_ : Iterable):
'''simple docstring'''
return list(map(lowerCamelCase_ ,lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''wb''') as f:
return pickle.dump(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
def remove_articles(lowerCamelCase_ : List[str]):
return re.sub(r'''\b(a|an|the)\b''' ,''' ''' ,lowerCamelCase_)
def white_space_fix(lowerCamelCase_ : Optional[int]):
return " ".join(text.split())
def remove_punc(lowerCamelCase_ : List[str]):
lowerCAmelCase__ : List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase_ : Optional[int]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_))))
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = normalize_answer(lowerCamelCase_).split()
lowerCAmelCase__ : str = normalize_answer(lowerCamelCase_).split()
lowerCAmelCase__ : str = Counter(lowerCamelCase_) & Counter(lowerCamelCase_)
lowerCAmelCase__ : Dict = sum(common.values())
if num_same == 0:
return 0
lowerCAmelCase__ : Optional[int] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any):
'''simple docstring'''
return normalize_answer(lowerCamelCase_) == normalize_answer(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
assert len(lowerCamelCase_) == len(lowerCamelCase_)
lowerCAmelCase__ : List[str] = 0
for hypo, pred in zip(lowerCamelCase_ ,lowerCamelCase_):
em += exact_match_score(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) > 0:
em /= len(lowerCamelCase_)
return {"em": em}
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return model_prefix.startswith('''rag''')
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase__ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_):
if not hasattr(lowerCamelCase_ ,lowerCamelCase_) and not hasattr(lowerCamelCase_ ,equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCamelCase_))
delattr(lowerCamelCase_ ,lowerCamelCase_)
continue
lowerCAmelCase__ : Dict = p if hasattr(lowerCamelCase_ ,lowerCamelCase_) else equivalent_param[p]
setattr(lowerCamelCase_ ,lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_))
delattr(lowerCamelCase_ ,lowerCamelCase_)
return hparams, config
| 94
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = {}
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=1 ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(a__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(a__ ):
_UpperCAmelCase : int = []
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return list(self.graph )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any]=-2 , lowerCAmelCase__ : Optional[int]=-1 ) -> int:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase : Any = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Dict=-1 ) -> Dict:
"""simple docstring"""
if c == -1:
_UpperCAmelCase : Union[str, Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[Any]=-2 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = deque()
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : List[Any] = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Tuple ) -> int:
"""simple docstring"""
return len(self.graph[u] )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Dict=-2 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return sorted_nodes
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : List[Any] = -2
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Any = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(a__ ) != 0:
_UpperCAmelCase : Any = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : List[str] = False
indirect_parents.append(a__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Optional[Any] = -2
_UpperCAmelCase : Any = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Union[str, Any] = True
if len(a__ ) != 0:
_UpperCAmelCase : int = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(a__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Tuple=-2 , lowerCAmelCase__ : List[Any]=-1 ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = time()
self.dfs(a__ , a__ )
_UpperCAmelCase : List[str] = time()
return end - begin
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any]=-2 ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = time()
self.bfs(a__ )
_UpperCAmelCase : int = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = {}
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict=1 ) -> Optional[Any]:
"""simple docstring"""
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : Any = [[w, v]]
# add the other way
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Union[str, Any] = [[w, u]]
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
# the other way round
if self.graph.get(a__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a__ )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : int=-2 , lowerCAmelCase__ : Union[str, Any]=-1 ) -> List[Any]:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Tuple=-1 ) -> Dict:
"""simple docstring"""
if c == -1:
_UpperCAmelCase : int = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = deque()
_UpperCAmelCase : List[str] = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
return len(self.graph[u] )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Tuple = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Dict = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Any = True
if len(a__ ) != 0:
_UpperCAmelCase : int = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(a__ )
_UpperCAmelCase : str = s
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : str = s
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(a__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(a__ )
_UpperCAmelCase : Any = s
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return list(self.graph )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str]=-2 , lowerCAmelCase__ : List[str]=-1 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = time()
self.dfs(a__ , a__ )
_UpperCAmelCase : Dict = time()
return end - begin
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int=-2 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = time()
self.bfs(a__ )
_UpperCAmelCase : Optional[Any] = time()
return end - begin
| 145
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowerCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowerCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowerCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase__ , default=5e-4 )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowerCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowerCAmelCase__ , default=1_0 )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , default="""./results""" )
return parser.parse_args()
__lowerCAmelCase : Dict =load("""accuracy""")
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase = eval_pred
lowercase = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = trainer
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
if control.should_evaluate:
lowercase = deepcopy(__lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
lowercase = get_args()
set_seed(args.seed )
lowercase = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowercase = dataset.train_test_split(test_size=0.2 )
lowercase = train_test["""test"""].train_test_split(test_size=0.5 )
lowercase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowercase = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase = tokenizer.eos_token
lowercase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase = False
lowercase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowerCAmelCase__ :str ):
lowercase = tokenizer(example["""src"""] , truncation=lowerCAmelCase__ , max_length=1_0_2_4 )
lowercase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
lowercase = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
lowercase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 364
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 0
|
import argparse
import os
import re
A_ : int = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
A_ : Any = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
A_ : int = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def UpperCamelCase (lowercase_: List[str] , lowercase_: bool = False ) -> Optional[Any]:
with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
A__ : Optional[Any] = f.read()
A__ : Optional[Any] = content.split("""\n""" )
A__ : Optional[Any] = []
A__ : Union[str, Any] = 0
while line_idx < len(lowercase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A__ : Union[str, Any] = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
A__ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A__ : Dict = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A__ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : _re_identifier.search(lowercase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
elif "\n".join(lowercase_ ) != content:
return True
def UpperCamelCase (lowercase_: bool = False ) -> List[Any]:
A__ : List[str] = [os.path.join(lowercase_ , lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith(""".py""" )]
A__ : List[Any] = [sort_auto_mapping(lowercase_ , overwrite=lowercase_ ) for fname in fnames]
if not overwrite and any(lowercase_ ):
A__ : int = [f for f, d in zip(lowercase_ , lowercase_ ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(lowercase_ )}. Run `make style` to fix"""
""" this.""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A_ : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 192
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A_ : List[str] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A_ : Optional[int] = 'hopper-medium-v2'
A_ : List[Any] = gym.make(env_name)
A_ : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A_ : List[Any] = env.reset()
A_ : Optional[int] = 0
A_ : str = 0
A_ : Optional[Any] = 1000
A_ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A_ : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
A_ , A_ , A_ , A_ : Dict = env.step(denorm_actions)
A_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A_ : int = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 192
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[int]:
"""simple docstring"""
A__ = int(lowercase_ )
# Initialize Result
A__ = []
# Traverse through all denomination
for denomination in reversed(lowercase_ ):
# Find denominations
while int(lowercase_ ) >= int(lowercase_ ):
total_value -= int(lowercase_ )
answer.append(lowercase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_lowerCamelCase : Any = []
_lowerCamelCase : int = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_lowerCamelCase : Union[str, Any] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
_lowerCamelCase : Dict = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_lowerCamelCase : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
_lowerCamelCase : Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
_lowerCamelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 231
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Dict ) -> Any:
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE__ ( __A ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = ['pixel_values']
def __init__( self : Optional[int] , lowercase : Any = True , lowercase : Dict = None , lowercase : int = PILImageResampling.BILINEAR , lowercase : str = True , lowercase : Optional[int] = None , lowercase : Tuple = True , lowercase : int = 1 / 255 , lowercase : Optional[int] = True , lowercase : Optional[int] = True , lowercase : List[Any] = None , lowercase : str = None , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_snake_case = size if size is not None else {'shortest_edge': 256}
_snake_case = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = offset
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : str , lowercase : List[Any] , lowercase : int , lowercase : Dict = PILImageResampling.BILINEAR , lowercase : Optional[Any] = None , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(__UpperCAmelCase , size['shortest_edge'] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def A ( self : Optional[Any] , lowercase : Dict , lowercase : Optional[int] , lowercase : Tuple = None , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def A ( self : Union[str, Any] , lowercase : str , lowercase : str , lowercase : Any = True , lowercase : List[Any] = None , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = image.astype(np.floataa )
if offset:
_snake_case = image - (scale / 2)
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def A ( self : List[Any] , lowercase : str , lowercase : str , lowercase : str , lowercase : str = None , **lowercase : Optional[int] , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def A ( self : Dict , lowercase : int , lowercase : str = None , lowercase : Optional[Any] = None , lowercase : int = None , lowercase : str = None , lowercase : Dict = None , lowercase : Optional[int] = None , lowercase : Any = None , lowercase : str = None , lowercase : Optional[Any] = None , lowercase : List[str] = None , lowercase : Dict = None , lowercase : List[str] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(__UpperCAmelCase )
if do_resize:
_snake_case = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase )
if do_center_crop:
_snake_case = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase )
if do_rescale:
_snake_case = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , offset=__UpperCAmelCase )
if do_normalize:
_snake_case = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase )
_snake_case = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase )
return image
def A ( self : List[Any] , lowercase : Tuple , lowercase : str = None , lowercase : List[Any] = None , lowercase : List[str] = None , lowercase : Optional[int] = None , lowercase : Optional[Any] = None , lowercase : Union[str, Any] = None , lowercase : Union[str, Any] = None , lowercase : Tuple = None , lowercase : str = None , lowercase : List[Any] = None , lowercase : Optional[Any] = None , lowercase : Optional[int] = None , lowercase : Union[str, Any] = ChannelDimension.FIRST , **lowercase : Tuple , ):
'''simple docstring'''
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = offset if offset is not None else self.offset
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_snake_case = make_batched(__UpperCAmelCase )
_snake_case = [
[
self._preprocess_image(
image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , offset=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , )
for img in video
]
for video in videos
]
_snake_case = {'pixel_values': videos}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 282
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase = "main"
# Default branch name
__lowerCamelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase__( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_tf
def snake_case__ ( self ) -> str:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_flax
def snake_case__ ( self ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
| 221
| 0
|
def UpperCAmelCase__ (UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
snake_case , snake_case = head.next, head
while fast and fast.next:
snake_case = fast.next.next
snake_case = slow.next
snake_case = slow.next
snake_case = None # Don't forget here! But forget still works!
# reverse the second part
snake_case = None
while second:
snake_case = second.next
snake_case = node
snake_case = second
snake_case = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case = node.next
snake_case = head.next
return True
def UpperCAmelCase__ (UpperCamelCase_ ) -> str:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case = snake_case = snake_case = head
while fast and fast.next:
snake_case , snake_case = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case = [slow.val]
while slow.next:
snake_case = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case = cur.next
return True
def UpperCAmelCase__ (UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
snake_case = {}
snake_case = 0
while head:
if head.val in d:
d[head.val].append(UpperCamelCase_ )
else:
snake_case = [pos]
snake_case = head.next
pos += 1
snake_case = pos - 1
snake_case = 0
for v in d.values():
if len(UpperCamelCase_ ) % 2 != 0:
middle += 1
else:
snake_case = 0
for i in range(0 ,len(UpperCamelCase_ ) ):
if v[i] + v[len(UpperCamelCase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 368
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 213
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase__: str =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: Tuple =getattr(__a , __a ).shape
else:
lowerCamelCase__: List[str] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase__: List[Any] =value
elif weight_type == "weight_g":
lowerCamelCase__: Tuple =value
elif weight_type == "weight_v":
lowerCamelCase__: List[str] =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: str =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Any =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: Tuple =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: Optional[int] =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[Any] ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase__: str =True
if "*" in mapped_key:
lowerCamelCase__: List[str] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: Tuple =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: str ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Dict ="weight"
else:
lowerCamelCase__: Any =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =full_name.split("conv_layers." )[-1]
lowerCamelCase__: Tuple =name.split("." )
lowerCamelCase__: Any =int(items[0] )
lowerCamelCase__: Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase__: Any =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase__: Optional[int] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase__: str =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: Dict =UniSpeechSatConfig.from_pretrained(__a )
else:
lowerCamelCase__: Union[str, Any] =UniSpeechSatConfig()
lowerCamelCase__: Any =""
if is_finetuned:
lowerCamelCase__: Optional[int] =UniSpeechSatForCTC(__a )
else:
lowerCamelCase__: Optional[int] =UniSpeechSatForPreTraining(__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase__: Optional[Any] =model[0].eval()
recursively_load_weights(__a , __a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ):
"""simple docstring"""
a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ):
"""simple docstring"""
if split_mlp_wi:
a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
a :Dict = (wi_a, wi_a)
else:
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :str = traverse_util.flatten_dict(variables['''target'''] )
a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase_ )
a :Optional[Any] = collections.OrderedDict()
# Shared embeddings.
a :Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' )
a :List[Any] = layer_norm
a :str = k.T
a :Dict = o.T
a :int = q.T
a :Optional[Any] = v.T
# Block i, layer 1 (MLP).
a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ )
a :Any = layer_norm
if split_mlp_wi:
a :Any = wi[0].T
a :Tuple = wi[1].T
else:
a :List[str] = wi.T
a :List[Any] = wo.T
a :Union[str, Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
a :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' )
a :List[Any] = layer_norm
a :Tuple = k.T
a :int = o.T
a :Any = q.T
a :Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
a :str = layer_norm
a :Optional[Any] = k.T
a :Any = o.T
a :Dict = q.T
a :Optional[Any] = v.T
# Block i, layer 2 (MLP).
a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ )
a :Optional[int] = layer_norm
if split_mlp_wi:
a :int = wi[0].T
a :Tuple = wi[1].T
else:
a :str = wi.T
a :Dict = wo.T
a :Any = old['''decoder/decoder_norm/scale''']
a :Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a :Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a :Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
a :Optional[Any] = state_dict['''shared.weight''']
return state_dict
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ )
a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a :Any = TaEncoderModel(UpperCAmelCase_ )
else:
a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase_ )
print('''Done''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 94
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_=False )-> List[str]:
'''simple docstring'''
a : str = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
a : str = "segformer.encoder." + key
if key.startswith("backbone" ):
a : List[str] = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a : List[Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
a : Optional[int] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__snake_case )-1}''' )
if "norm" in key:
a : int = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a : Tuple = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
a : List[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__snake_case )-1}''' )
if "layer_norm1" in key:
a : Any = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a : Any = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a : Any = key[key.find("block" ) + len("block" )]
a : str = key.replace(F'''block{idx}''' , F'''block.{int(__snake_case )-1}''' )
if "attn.q" in key:
a : str = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a : Dict = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a : str = key.replace("attn" , "attention.self" )
if "fc1" in key:
a : Dict = key.replace("fc1" , "dense1" )
if "fc2" in key:
a : Tuple = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a : Any = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a : str = key.replace("linear_fuse.conv" , "linear_fuse" )
a : List[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
a : List[str] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__snake_case )-1}''' )
if key.startswith("head" ):
a : str = key.replace("head" , "classifier" )
a : Optional[int] = value
return new_state_dict
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a : List[str] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
a : Optional[Any] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
a : int = kv_weight[
: config.hidden_sizes[i], :
]
a : Tuple = kv_bias[: config.hidden_sizes[i]]
a : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
a : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def lowercase ( )-> Optional[Any]:
'''simple docstring'''
a : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : Dict = SegformerConfig()
a : List[Any] = False
# set attributes based on model_name
a : List[Any] = "huggingface/label-files"
if "segformer" in model_name:
a : List[Any] = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
a : List[Any] = 150
a : Any = "ade20k-id2label.json"
a : str = (1, 150, 128, 128)
elif "city" in model_name:
a : Optional[Any] = 19
a : Any = "cityscapes-id2label.json"
a : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
a : List[Any] = True
a : List[Any] = model_name[4:6]
a : str = 1_000
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[int] = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
a : Optional[int] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
a : Any = {int(__snake_case ): v for k, v in idalabel.items()}
a : Union[str, Any] = idalabel
a : Any = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
a : int = [64, 128, 320, 512]
a : List[Any] = 256
elif size == "b2":
a : Tuple = [64, 128, 320, 512]
a : List[Any] = 768
a : Union[str, Any] = [3, 4, 6, 3]
elif size == "b3":
a : List[Any] = [64, 128, 320, 512]
a : Dict = 768
a : Tuple = [3, 4, 18, 3]
elif size == "b4":
a : Tuple = [64, 128, 320, 512]
a : Optional[Any] = 768
a : int = [3, 8, 27, 3]
elif size == "b5":
a : List[Any] = [64, 128, 320, 512]
a : Any = 768
a : List[str] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
a : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
# prepare image
a : Union[str, Any] = prepare_img()
a : Optional[Any] = image_processor(images=__snake_case , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
a : List[Any] = torch.load(__snake_case , map_location=torch.device("cpu" ) )
else:
a : Tuple = torch.load(__snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
a : Tuple = rename_keys(__snake_case , encoder_only=__snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
if encoder_only:
a : Tuple = False
a : str = SegformerForImageClassification(__snake_case )
else:
a : str = SegformerForSemanticSegmentation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
a : Optional[Any] = model(__snake_case )
a : Optional[int] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
a : Optional[Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
a : Optional[Any] = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
a : Optional[Any] = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
a : List[str] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
a : Union[str, Any] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
a : int = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
a : List[str] = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
a : Tuple = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
a : int = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
a : Any = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
a : Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
a : Optional[int] = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
a : Optional[int] = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
a : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
a : Any = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
a : Union[str, Any] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 364
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
a : str = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A_ , )
is not None
):
a : List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a : int = True
if not attribute_used:
a : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a : List[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a : str = True
elif attribute.endswith("_token_id" ):
a : str = True
# configuration class specific cases
if not case_allowed:
a : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
a : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a : Dict = {}
if len(config_class.attribute_map ) > 0:
a : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a : int = inspect.getsourcefile(A_ )
a : Union[str, Any] = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a : Optional[Any] = [os.path.join(A_ , A_ ) for fn in os.listdir(A_ ) if fn.startswith("modeling_" )]
# Get the source code strings
a : Tuple = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
a : Optional[Any] = []
for config_param, default_value in zip(A_ , A_ ):
# `attributes` here is all the variant names for `config_param`
a : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_ , A_ , A_ , A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : List[Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A_ : inspect.isclass(A_ )
and issubclass(A_ , A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
a : Dict = unused_attributes
if len(A_ ) > 0:
a : Union[str, Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 226
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ : Any = {'UserAgent': UserAgent().random}
def UpperCamelCase__ ( A__ ) -> dict:
snake_case__ : Tuple = script.contents[0]
snake_case__ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = F"""https://www.instagram.com/{username}/"""
snake_case__ : Optional[Any] = self.get_json()
def __a ( self ) -> dict:
'''simple docstring'''
snake_case__ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
snake_case__ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def __a ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __a ( self ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def UpperCamelCase__ ( A__ = "github" ) -> None:
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
snake_case__ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Union[str, Any] = InstagramUser('''github''')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 143
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape
a_ : List[str] = jax.image.resize(
SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : str = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : Any = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : int = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : List[Any] = None
if use_nin_shortcut:
a_ : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
a_ : List[Any] = hidden_states
a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ )
a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 )
a_ : Optional[int] = hidden_states + temb
a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ )
if self.conv_shortcut is not None:
a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ )
return hidden_states + residual
| 32
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
lowerCamelCase_ = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
lowerCamelCase_ = """▁"""
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[Any] = VOCAB_FILES_NAMES
__a: Tuple = PRETRAINED_VOCAB_FILES_MAP
__a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_="[CLS]" , lowercase_="[SEP]" , lowercase_="<unk>" , lowercase_="[SEP]" , lowercase_="<pad>" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
lowerCAmelCase_ = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , lowercase_ ) -> Any:
'''simple docstring'''
if self.remove_space:
lowerCAmelCase_ = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase_ = inputs
lowerCAmelCase_ = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase_ = unicodedata.normalize('NFKD' , lowercase_ )
lowerCAmelCase_ = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
lowerCAmelCase_ = outputs.lower()
return outputs
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.preprocess_text(lowercase_ )
lowerCAmelCase_ = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
lowerCAmelCase_ = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ = cur_pieces[1:]
else:
lowerCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase_ )
def _lowercase ( self , lowercase_ ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase_ )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = []
lowerCAmelCase_ = ''
lowerCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase_ = True
lowerCAmelCase_ = []
else:
current_sub_tokens.append(lowercase_ )
lowerCAmelCase_ = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 14
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCAmelCase :
def __init__( self ) -> Union[str, Any]:
lowerCAmelCase_ = {}
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = {}
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
if nodea not in self.connections:
self.add_node(_UpperCamelCase )
if nodea not in self.connections:
self.add_node(_UpperCamelCase )
lowerCAmelCase_ = probability
def __a ( self ) -> list[str]:
return list(self.connections )
def __a ( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = 0
lowerCAmelCase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : list[tuple[str, str, float]] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = Counter(graph.get_nodes() )
lowerCAmelCase_ = start
for _ in range(__lowerCAmelCase ):
lowerCAmelCase_ = graph.transition(__lowerCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
|
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231
| 1
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE_: List[str] ={
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any ='hopper-medium-v2'
SCREAMING_SNAKE_CASE_: Optional[int] =gym.make(env_name)
SCREAMING_SNAKE_CASE_: Tuple =ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE_: Optional[Any] =env.reset()
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Tuple =0
SCREAMING_SNAKE_CASE_: Any =10_00
SCREAMING_SNAKE_CASE_: Optional[int] =[obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE_: Any =pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Union[str, Any] =env.step(denorm_actions)
SCREAMING_SNAKE_CASE_: Optional[int] =env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
f" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE_: Dict =next_observation
except KeyboardInterrupt:
pass
print(f"Total reward: {total_reward}")
| 106
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __A :
@property
def _lowercase (self : int ):
return self.get_dummy_input()
@property
def _lowercase (self : Any ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _lowercase (self : Tuple , __a : List[str]=True , __a : Any=False , __a : List[Any]=False , __a : Any=False , ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 32
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = (batch_size, num_channels) + sizes
UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a )
UpperCAmelCase_ = {"hidden_states": hidden_states}
if include_temb:
UpperCAmelCase_ = 128
UpperCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a )
if include_res_hidden_states_tuple:
UpperCAmelCase_ = torch.manual_seed(1 )
UpperCAmelCase_ = (randn_tensor(__a , generator=__a , device=__a ),)
if include_encoder_hidden_states:
UpperCAmelCase_ = floats_tensor((batch_size, 32, 32) ).to(__a )
if include_skip_sample:
UpperCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a )
return dummy_input
def _lowercase (self : Tuple ):
UpperCAmelCase_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
UpperCAmelCase_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : Tuple , __a : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**__a )
unet_block.to(__a )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase_ = unet_block(**__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCAmelCase_ = output[0, -1, -3:, -3:]
UpperCAmelCase_ = torch.tensor(__a ).to(__a )
assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def _lowercase (self : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**__a )
model.to(__a )
model.train()
UpperCAmelCase_ = model(**__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = output[0]
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = randn_tensor(output.shape , device=__a )
UpperCAmelCase_ = torch.nn.functional.mse_loss(__a , __a )
loss.backward()
| 106
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
def __init__( self , _a , _a=13 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=224 , _a=1_000 , _a=[3, 3, 6, 4] , _a=[48, 56, 112, 220] , ):
lowercase : Union[str, Any] = parent
lowercase : List[Any] = batch_size
lowercase : Any = num_channels
lowercase : List[str] = is_training
lowercase : Optional[Any] = use_labels
lowercase : Optional[int] = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : int = num_labels
lowercase : Any = image_size
lowercase : Any = layer_depths
lowercase : Union[str, Any] = embed_dims
def __magic_name__ ( self ):
lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : int = ids_tensor([self.batch_size] , self.num_labels )
lowercase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCamelCase , layer_scale_init_value=1E-5 , )
def __magic_name__ ( self , _a , _a , _a ):
lowercase : List[str] = SwiftFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __magic_name__ ( self , _a , _a , _a ):
lowercase : List[Any] = self.num_labels
lowercase : str = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase : Optional[int] = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
(lowercase) : List[str] = self.prepare_config_and_inputs()
lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowercase_, lowercase_, unittest.TestCase ):
__lowerCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __magic_name__ ( self ):
lowercase : List[Any] = SwiftFormerModelTester(self )
lowercase : Optional[int] = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(__UpperCamelCase )
lowercase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __magic_name__ ( self ):
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(__UpperCamelCase )
lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Tuple = [*signature.parameters.keys()]
lowercase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __magic_name__ ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __magic_name__ ( self ):
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __magic_name__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str = SwiftFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
def check_hidden_states_output(_a , _a , _a ):
lowercase : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
lowercase : Dict = outputs.hidden_states
lowercase : Union[str, Any] = 8
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Dict = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __magic_name__ ( self ):
def _config_zero_init(_a ):
lowercase : Optional[Any] = copy.deepcopy(__UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCamelCase , __UpperCamelCase , 1E-10 )
if isinstance(getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ):
lowercase : Optional[Any] = _config_zero_init(getattr(__UpperCamelCase , __UpperCamelCase ) )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return configs_no_init
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self ):
pass
def __magic_name__ ( ) -> Optional[Any]:
lowercase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ):
lowercase : Dict = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__UpperCamelCase )
lowercase : List[Any] = self.default_image_processor
lowercase : Dict = prepare_img()
lowercase : int = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
lowercase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
lowercase : Optional[Any] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 202
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = args.log_outputs
lowercase_ = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
lowercase_ = load_metric("""wer""" )
lowercase_ = load_metric("""cer""" )
# compute metrics
lowercase_ = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
lowercase_ = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
lowercase_ = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase_ = F'''log_{dataset_id}_predictions.txt'''
lowercase_ = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as p, open(SCREAMING_SNAKE_CASE_ , """w""" ) as t:
# mapping function to write output
def write_to_file(__lowerCAmelCase , __lowerCAmelCase ):
p.write(F'''{i}''' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F'''{i}''' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase_ = re.sub(SCREAMING_SNAKE_CASE_ , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase_ = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
lowercase_ = """ """.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase_ = feature_extractor.sampling_rate
# resample audio
lowercase_ = dataset.cast_column("""audio""" , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowercase_ = 0 if torch.cuda.is_available() else -1
lowercase_ = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowerCAmelCase ):
lowercase_ = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase_ = prediction["""text"""]
lowercase_ = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
lowercase_ = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase : Optional[int] = parser.parse_args()
main(args)
| 364
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 0
|
'''simple docstring'''
def lowercase__ ( __lowercase : float , __lowercase : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__lowercase ) , __lowercase )
return number - int(__lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 53
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , '''width_multiplier''' ) )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Any , a_ : Tuple=13 , a_ : Tuple=64 , a_ : Optional[int]=2 , a_ : List[str]=3 , a_ : Optional[Any]="swish" , a_ : Optional[Any]=3 , a_ : str=32 , a_ : Dict=0.1 , a_ : int=0.0_2 , a_ : Tuple=True , a_ : List[Any]=True , a_ : Optional[int]=10 , a_ : Optional[int]=None , a_ : Dict=0.2_5 , a_ : Tuple=0.0 , a_ : List[Any]=0.0 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = make_divisible(5_12 * width_multiplier , divisor=8 )
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Dict = conv_kernel_size
__UpperCAmelCase : Optional[Any] = output_stride
__UpperCAmelCase : Dict = classifier_dropout_prob
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Optional[Any] = width_multiplier
__UpperCAmelCase : List[str] = ffn_dropout
__UpperCAmelCase : Dict = attn_dropout
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case__ ( self : Optional[Any] , a_ : Dict , a_ : List[Any] , a_ : Dict , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = MobileViTVaModel(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Union[str, Any] , a_ : Dict , a_ : Union[str, Any] , a_ : str , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : List[str] = MobileViTVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : str , a_ : List[str] , a_ : Optional[Any] , a_ : List[str] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[Any] = MobileViTVaForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__UpperCAmelCase : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MobileViTVaModelTester(self )
__UpperCAmelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=a_ , has_text_modality=a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def snake_case__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(a_ )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(a_ : List[Any] , a_ : List[Any] , a_ : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : str = 5
self.assertEqual(len(a_ ) , a_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__UpperCAmelCase : Any = 2
for i in range(len(a_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@slow
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = MobileViTVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
a_ )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__UpperCAmelCase : Any = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : int = model.to(a_ )
__UpperCAmelCase : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**a_ )
__UpperCAmelCase : int = outputs.logits
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a_ )
__UpperCAmelCase : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = model.to(a_ )
__UpperCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
__UpperCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(50, 60)] )
__UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a_ )
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ )
__UpperCAmelCase : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a_ )
| 226
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : Tuple = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase__ = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(_UpperCamelCase )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCamelCase , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCamelCase , repo_id='test-model-flax' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCamelCase , 1E-3 , msg=F"{key} not identical" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(_UpperCamelCase )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCamelCase , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_UpperCamelCase , repo_id='valid_org/test-model-flax-org' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCamelCase , 1E-3 , msg=F"{key} not identical" )
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = flatten_dict(modela.params )
lowerCAmelCase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCAmelCase__ = False
return models_are_equal
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCAmelCase__ = FlaxBertModel(_UpperCamelCase )
lowerCAmelCase__ = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase , subfolder=_UpperCamelCase )
self.assertTrue(check_models_equal(_UpperCamelCase , _UpperCamelCase ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCAmelCase__ = FlaxBertModel(_UpperCamelCase )
lowerCAmelCase__ = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCamelCase , _UpperCamelCase ) , max_shard_size='10KB' )
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase , subfolder=_UpperCamelCase )
self.assertTrue(check_models_equal(_UpperCamelCase , _UpperCamelCase ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'bert'
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase , subfolder=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'bert'
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(_UpperCamelCase , subfolder=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
| 122
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 122
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : Any = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
_lowerCamelCase : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_lowerCamelCase : Optional[int] = """▁"""
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : int="[SEP]" , UpperCAmelCase__ : int="<unk>" , UpperCAmelCase__ : Tuple="[SEP]" , UpperCAmelCase__ : List[str]="<pad>" , UpperCAmelCase__ : List[str]="[CLS]" , UpperCAmelCase__ : Union[str, Any]="[MASK]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Any , ) ->None:
'''simple docstring'''
A__ = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__)
else mask_token
)
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return len(self.sp_model)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
if self.remove_space:
A__ = ''' '''.join(inputs.strip().split())
else:
A__ = inputs
A__ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
A__ = unicodedata.normalize('''NFKD''' , UpperCAmelCase__)
A__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__)])
if self.do_lower_case:
A__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
A__ = self.preprocess_text(UpperCAmelCase__)
A__ = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
A__ = []
for piece in pieces:
if len(UpperCAmelCase__) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
A__ = cur_pieces[1:]
else:
A__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(UpperCAmelCase__)
else:
new_pieces.append(UpperCAmelCase__)
return new_pieces
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict) ->List[Any]:
'''simple docstring'''
A__ = []
A__ = ''''''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
A__ = False
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__)
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__)) + [1] + ([0] * len(UpperCAmelCase__)) + [1]
return [1] + ([0] * len(UpperCAmelCase__)) + [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 14
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14
| 1
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
def __init__( self : int , *__snake_case : Optional[int] , **__snake_case : List[str] ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 177
|
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177
| 1
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[str] = old_name
if "patch_embed" in old_name:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = old_name.split('''.''' )
if layer == "0":
lowerCAmelCase__ : str = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
lowerCAmelCase__ : List[Any] = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
lowerCAmelCase__ : Optional[Any] = old_name.replace('''3''' , '''convolution2''' )
else:
lowerCAmelCase__ : Optional[int] = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , A_ ):
lowerCAmelCase__ : int = r'''\b\d{2}\b'''
if bool(re.search(A_ , A_ ) ):
lowerCAmelCase__ : Union[str, Any] = re.search(r'''\d\.\d\d.''' , A_ ).group()
else:
lowerCAmelCase__ : Tuple = re.search(r'''\d\.\d.''' , A_ ).group()
if int(match[0] ) < 6:
lowerCAmelCase__ : Tuple = old_name.replace(A_ , '''''' )
lowerCAmelCase__ : int = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
lowerCAmelCase__ : str = '''intermediate_stages.''' + trimmed_name
else:
lowerCAmelCase__ : Dict = old_name.replace(A_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCAmelCase__ : str = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
lowerCAmelCase__ : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCAmelCase__ : Tuple = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
lowerCAmelCase__ : Any = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
lowerCAmelCase__ : str = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
lowerCAmelCase__ : int = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
lowerCAmelCase__ : int = trimmed_name.replace('''fc2''' , '''linear_out''' )
lowerCAmelCase__ : Any = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , A_ ):
lowerCAmelCase__ : Tuple = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
lowerCAmelCase__ : Union[str, Any] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCAmelCase__ : int = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCAmelCase__ : str = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
lowerCAmelCase__ : Tuple = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
lowerCAmelCase__ : Any = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
lowerCAmelCase__ : Tuple = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
lowerCAmelCase__ : Optional[int] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCAmelCase__ : Optional[int] = new_name.replace('''norm''' , '''layernorm''' )
lowerCAmelCase__ : Tuple = '''efficientformer.''' + new_name
else:
lowerCAmelCase__ : Dict = '''efficientformer.encoder.''' + new_name
return new_name
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for key in checkpoint.copy().keys():
lowerCAmelCase__ : List[Any] = checkpoint.pop(A_ )
lowerCAmelCase__ : List[str] = val
return checkpoint
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(A_ , stream=A_ ).raw )
return image
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Tuple = torch.load(A_ , map_location='''cpu''' )['''model''']
lowerCAmelCase__ : int = EfficientFormerConfig.from_json_file(A_ )
lowerCAmelCase__ : Tuple = EfficientFormerForImageClassificationWithTeacher(A_ )
lowerCAmelCase__ : Dict = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
lowerCAmelCase__ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
lowerCAmelCase__ : Any = convert_torch_checkpoint(A_ , A_ )
model.load_state_dict(A_ )
model.eval()
lowerCAmelCase__ : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = 2_56
lowerCAmelCase__ : List[str] = 2_24
lowerCAmelCase__ : int = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
lowerCAmelCase__ : Optional[int] = processor(images=A_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
lowerCAmelCase__ : Optional[int] = Compose(
[
Resize(A_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(A_ ),
ToTensor(),
Normalize(A_ , A_ ),
] )
lowerCAmelCase__ : List[str] = image_transforms(A_ ).unsqueeze(0 )
assert torch.allclose(A_ , A_ )
lowerCAmelCase__ : Dict = model(A_ )
lowerCAmelCase__ : int = outputs.logits
lowerCAmelCase__ : Tuple = (1, 10_00)
if "l1" in model_name:
lowerCAmelCase__ : Optional[Any] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCAmelCase__ : Optional[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCAmelCase__ : Dict = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(A_ )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='''Add model''' , use_temp_dir=A_ , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='''Add image processor''' , use_temp_dir=A_ , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__UpperCamelCase : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 106
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase__ ( a__: Optional[int] , a__: int , a__: Tuple , a__: List[str] , a__: Dict=True , a__: Optional[int]="pt" ) -> Any:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(a__ , a__ ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=a__ , padding='max_length' if pad_to_max_length else None , truncation=a__ , return_tensors=a__ , add_special_tokens=a__ , **a__ , )
def lowerCAmelCase__ ( a__: List[Any] , a__: Dict , a__: Union[str, Any]=None , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(a__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="train" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '.source' )
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '.target' )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , _SCREAMING_SNAKE_CASE ).rstrip('\n' )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , _SCREAMING_SNAKE_CASE ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
_UpperCAmelCase = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , 'right' )
_UpperCAmelCase = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , 'right' )
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch] )
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch] )
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ :Tuple = getLogger(__name__)
def lowerCAmelCase__ ( a__: List[List] ) -> Any:
'''simple docstring'''
return list(itertools.chain.from_iterable(a__ ) )
def lowerCAmelCase__ ( a__: str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(a__ , os.path.join(a__ , 'git_log.json' ) )
def lowerCAmelCase__ ( a__: Any , a__: List[str] , a__: Union[str, Any]=4 , **a__: int ) -> Optional[int]:
'''simple docstring'''
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ , indent=a__ , **a__ )
def lowerCAmelCase__ ( a__: List[Any] ) -> str:
'''simple docstring'''
with open(a__ ) as f:
return json.load(a__ )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=a__ )
_UpperCAmelCase = {
'repo_id': str(a__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase__ ( a__: Callable , a__: Iterable ) -> List:
'''simple docstring'''
return list(map(a__ , a__ ) )
def lowerCAmelCase__ ( a__: Any , a__: Dict ) -> Optional[int]:
'''simple docstring'''
with open(a__ , 'wb' ) as f:
return pickle.dump(a__ , a__ )
def lowerCAmelCase__ ( a__: List[str] ) -> Dict:
'''simple docstring'''
def remove_articles(a__: Dict ):
return re.sub(R'\b(a|an|the)\b' , ' ' , a__ )
def white_space_fix(a__: Any ):
return " ".join(text.split() )
def remove_punc(a__: Any ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__: Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowerCAmelCase__ ( a__: Optional[Any] , a__: List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(a__ ).split()
_UpperCAmelCase = normalize_answer(a__ ).split()
_UpperCAmelCase = Counter(a__ ) & Counter(a__ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(a__ )
_UpperCAmelCase = 1.0 * num_same / len(a__ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Union[str, Any] ) -> List[str]:
'''simple docstring'''
return normalize_answer(a__ ) == normalize_answer(a__ )
def lowerCAmelCase__ ( a__: List[str] , a__: List[str] ) -> Dict:
'''simple docstring'''
assert len(a__ ) == len(a__ )
_UpperCAmelCase = 0
for hypo, pred in zip(a__ , a__ ):
em += exact_match_score(a__ , a__ )
if len(a__ ) > 0:
em /= len(a__ )
return {"em": em}
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCAmelCase__ ( a__: int , a__: List[Any] , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(a__ , a__ , a__ ):
if not hasattr(a__ , a__ ) and not hasattr(a__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(a__ ) )
delattr(a__ , a__ )
continue
_UpperCAmelCase = p if hasattr(a__ , a__ ) else equivalent_param[p]
setattr(a__ , a__ , getattr(a__ , a__ ) )
delattr(a__ , a__ )
return hparams, config
| 185
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''BeitFeatureExtractor''']
lowerCAmelCase__ :Optional[Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case )
if save_dir is None:
__snake_case : int = f"""{dataset}-{pair}"""
__snake_case : Union[str, Any] = Path(_snake_case )
save_dir.mkdir(exist_ok=_snake_case )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split
__snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" )
__snake_case : int = save_dir.joinpath(f"""{fn}.target""" )
__snake_case : Union[str, Any] = src_path.open('''w+''' )
__snake_case : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : List[str] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : List[str] , **__lowercase : Any ) -> List[Any]:
pass
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__ ( self : Any , __lowercase : str , __lowercase : int , __lowercase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =pipeline(
'''document-question-answering''' , model=__lowercase , tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Optional[int] =list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , '''''' ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''What is the placebo?'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
{
'''image''': load_image(__lowercase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__ ( self : Any , __lowercase : int , __lowercase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =dqa_pipeline(__lowercase , top_k=2 )
self.assertEqual(
__lowercase , [
[
{'''score''': ANY(__lowercase ), '''answer''': ANY(__lowercase ), '''start''': ANY(__lowercase ), '''end''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''answer''': ANY(__lowercase ), '''start''': ANY(__lowercase ), '''end''': ANY(__lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE__ : str =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''How many cats are there?'''
SCREAMING_SNAKE_CASE__ : int =[
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE__ : int =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ : Dict ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ : Optional[int] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : Any =dqa_pipeline(image=__lowercase , question=__lowercase , words=__lowercase , boxes=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE__ : List[str] =INVOICE_URL
SCREAMING_SNAKE_CASE__ : int ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : Tuple =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ : str =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : List[Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : Tuple =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowercase )
SCREAMING_SNAKE_CASE__ : int =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowercase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE__ : str =INVOICE_URL
SCREAMING_SNAKE_CASE__ : int ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ : List[Any] =list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ : int =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowercase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ : int =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Tuple ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : List[Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ : Dict =list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE__ : List[str] =INVOICE_URL
SCREAMING_SNAKE_CASE__ : int ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__ ( self : int ) -> Tuple:
pass
| 352
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : List[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : str=False ) -> int:
if return_pvalue:
SCREAMING_SNAKE_CASE__ : List[str] =pearsonr(__lowercase , __lowercase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowercase , __lowercase )[0] )}
| 222
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = """time_series_transformer"""
A__ : List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "student_t" , __UpperCamelCase = "nll" , __UpperCamelCase = 1 , __UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , __UpperCamelCase = "mean" , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 3_2 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = True , __UpperCamelCase = "gelu" , __UpperCamelCase = 6_4 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 1_0_0 , __UpperCamelCase = 0.02 , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = prediction_length
UpperCamelCase_ = context_length or prediction_length
UpperCamelCase_ = distribution_output
UpperCamelCase_ = loss
UpperCamelCase_ = input_size
UpperCamelCase_ = num_time_features
UpperCamelCase_ = lags_sequence
UpperCamelCase_ = scaling
UpperCamelCase_ = num_dynamic_real_features
UpperCamelCase_ = num_static_real_features
UpperCamelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCamelCase_ = cardinality
else:
UpperCamelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCamelCase_ = embedding_dimension
else:
UpperCamelCase_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase_ = input_size * len(__UpperCamelCase ) + self._number_of_features
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = use_cache
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 122
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ( ) -> tuple[list[int], int]:
UpperCamelCase_ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCamelCase_ = randint(-5000 , 5000 )
return (arr, r)
_A = make_dataset()
def lowerCamelCase__ ( a__ : list[int] , a__ : int ) -> tuple[int, ...]:
for triplet in permutations(a__ , 3 ):
if sum(a__ ) == target:
return tuple(sorted(a__ ) )
return (0, 0, 0)
def lowerCamelCase__ ( a__ : list[int] , a__ : int ) -> tuple[int, int, int]:
arr.sort()
UpperCamelCase_ = len(a__ )
for i in range(n - 1 ):
UpperCamelCase_ , UpperCamelCase_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ( ) -> tuple[float, float]:
UpperCamelCase_ = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
UpperCamelCase_ = """
triplet_sum1(*dataset)
"""
UpperCamelCase_ = """
triplet_sum2(*dataset)
"""
UpperCamelCase_ = repeat(setup=a__ , stmt=a__ , repeat=5 , number=1_0000 )
UpperCamelCase_ = repeat(setup=a__ , stmt=a__ , repeat=5 , number=1_0000 )
return (min(a__ ), min(a__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 122
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__a = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__a = True
except (ImportError, ModuleNotFoundError):
__a = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __snake_case( _lowerCAmelCase ) -> str:
re.sub("""<n>""" , """""" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 43
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
lowercase__: str = downstream_dict['''projector.weight''']
lowercase__: Optional[int] = downstream_dict['''projector.bias''']
lowercase__: Dict = downstream_dict['''model.post_net.linear.weight''']
lowercase__: str = downstream_dict['''model.post_net.linear.bias''']
return model
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: Optional[int] = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
lowercase__: str = downstream_dict['''model.linear.weight''']
lowercase__: int = downstream_dict['''model.linear.bias''']
return model
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: int = WavaVecaForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
lowercase__: str = downstream_dict['''connector.weight''']
lowercase__: List[str] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase__: Optional[int] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase__: List[str] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase__: Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowercase__: Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowercase__: Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowercase__: Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowercase__: str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: str = torch.load(__UpperCAmelCase , map_location='''cpu''' )
lowercase__: str = checkpoint['''Downstream''']
lowercase__: int = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
lowercase__: int = WavaVecaFeatureExtractor.from_pretrained(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
lowercase__: Union[str, Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowercase__: Dict = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowercase__: Dict = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith('''ForXVector''' ):
lowercase__: Dict = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase__: Any = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 177
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177
| 1
|
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = name
__lowerCAmelCase = value
__lowerCAmelCase = weight
def __repr__( self : Union[str, Any] ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase ( self : int ) -> Optional[int]:
return self.value
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
return self.name
def lowercase ( self : List[Any] ) -> Tuple:
return self.weight
def lowercase ( self : int ) -> Dict:
return self.value / self.weight
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = sorted(lowerCAmelCase_, key=lowerCAmelCase_, reverse=lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase , __lowerCAmelCase = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Dict = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """encodec"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Optional[Any]=2_4_0_0_0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=1_2_8 , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[int]=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict="reflect" , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str=1_0_2_4 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ) -> Union[str, Any]:
__lowerCAmelCase = target_bandwidths
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = audio_channels
__lowerCAmelCase = normalize
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = overlap
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_filters
__lowerCAmelCase = num_residual_layers
__lowerCAmelCase = upsampling_ratios
__lowerCAmelCase = norm_type
__lowerCAmelCase = kernel_size
__lowerCAmelCase = last_kernel_size
__lowerCAmelCase = residual_kernel_size
__lowerCAmelCase = dilation_growth_rate
__lowerCAmelCase = use_causal_conv
__lowerCAmelCase = pad_mode
__lowerCAmelCase = compress
__lowerCAmelCase = num_lstm_layers
__lowerCAmelCase = trim_right_ratio
__lowerCAmelCase = codebook_size
__lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase_ )
@property
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase ( self : Optional[Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase ( self : str ) -> int:
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 207
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Union[str, Any] = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 185
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
A__ : Tuple = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) -> Tuple:
for attribute in key.split('.' ):
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
__lowerCamelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
__lowerCamelCase : Optional[int] = value
elif weight_type == "weight_v":
__lowerCamelCase : str = value
elif weight_type == "bias":
__lowerCamelCase : List[Any] = value
else:
__lowerCamelCase : List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = fairseq_model.state_dict()
__lowerCamelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : int = True
if "*" in mapped_key:
__lowerCamelCase : Optional[int] = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
__lowerCamelCase : List[str] = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
__lowerCamelCase : Dict = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : Any = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase : Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : List[Any] = 'weight'
else:
__lowerCamelCase : str = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Tuple:
__lowerCamelCase : List[str] = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : List[Any] = name.split('.' )
__lowerCamelCase : Any = int(items[0] )
__lowerCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowerCamelCase : Union[str, Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowerCamelCase : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowerCamelCase : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowerCamelCase : List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=None ) -> Optional[int]:
# load the pre-trained checkpoints
__lowerCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCamelCase : Union[str, Any] = WavLMOrig(UpperCAmelCase_ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCamelCase : Optional[int] = WavLMConfig.from_pretrained(UpperCAmelCase_ )
else:
__lowerCamelCase : Any = WavLMConfig()
__lowerCamelCase : Optional[int] = WavLMModel(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavlm.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 185
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
_a : Tuple = tempfile.mkdtemp()
_a : List[str] = 5
# Realm tok
_a : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Optional[int] = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_a : str = os.path.join(_a , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : List[str] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_a , exist_ok=_a )
def __lowercase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __lowercase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> List[str]:
_a : int = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __lowercase ( self ) -> Dict:
_a : Any = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_a , )
return block_records
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.get_config()
_a : Tuple = self.get_dummy_retriever()
_a : Dict = retriever.tokenizer
_a : Union[str, Any] = np.array([0, 3] , dtype='''long''' )
_a : Any = tokenizer(['''Test question'''] ).input_ids
_a : int = tokenizer(
['''the fourth'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_a : str = config.reader_seq_len
_a , _a , _a , _a : Any = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = self.get_config()
_a : Union[str, Any] = self.get_dummy_retriever()
_a : Tuple = retriever.tokenizer
_a : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_a : int = tokenizer(['''Test question'''] ).input_ids
_a : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_a : Union[str, Any] = config.reader_seq_len
_a , _a , _a , _a : List[Any] = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' )
self.assertEqual([False, True, True] , _a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a )
def __lowercase ( self ) -> int:
_a : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_a : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_a : Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_a : str = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 15
|
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_lowerCamelCase : Tuple = get_tests_dir('fixtures')
class lowercase ( unittest.TestCase):
def a_ ( self : int ):
"""simple docstring"""
A_ : str = mock.Mock()
A_ : Tuple = 5_00
A_ : str = {}
A_ : Union[str, Any] = HTTPError
A_ : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
A_ : int = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self : int ):
"""simple docstring"""
A_ : Optional[int] = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def a_ ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(A_ ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : List[str] = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(A_ )
@is_staging_test
class lowercase ( unittest.TestCase):
@classmethod
def a_ ( cls : Any ):
"""simple docstring"""
A_ : Tuple = TOKEN
HfFolder.save_token(A_ )
@classmethod
def a_ ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def a_ ( self : int ):
"""simple docstring"""
A_ : List[Any] = ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Union[str, Any] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='''test-image-processor''' , push_to_hub=A_ , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[int] = ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=A_ , use_auth_token=self._token )
A_ : str = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def a_ ( self : List[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : Any = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=A_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 167
|
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCAmelCase : Tuple = tuple[int, int]
class lowercase :
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = vertices
UpperCamelCase = {
(min(A_ ), max(A_ )): weight for edge, weight in edges.items()
}
def __UpperCamelCase ( self , A_ , A_ ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase = weight
def __UpperCamelCase ( self ) -> Graph:
"""simple docstring"""
UpperCamelCase = Graph({min(self.vertices )} , {} )
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase = edge
UpperCamelCase = weight
subgraph.add_edge(A_ , A_ )
return subgraph
def A ( lowercase = "p107_network.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = os.path.abspath(os.path.dirname(lowercase ) )
UpperCamelCase = os.path.join(lowercase , lowercase )
UpperCamelCase = {}
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
with open(lowercase ) as f:
UpperCamelCase = f.read().strip().split('\n' )
UpperCamelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase ) ):
for edgea in range(lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase = Graph(set(range(len(lowercase ) ) ) , lowercase )
UpperCamelCase = graph.prims_algorithm()
UpperCamelCase = sum(graph.edges.values() )
UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 222
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase)
if return_labels:
if model_class in get_values(__lowercase):
lowerCamelCase__: Union[str, Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Any=None , ) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Dict =seq_length
lowerCamelCase__: List[str] =is_training
lowerCamelCase__: List[Any] =use_input_mask
lowerCamelCase__: Optional[int] =use_token_type_ids
lowerCamelCase__: Any =use_labels
lowerCamelCase__: List[str] =vocab_size
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: Dict =intermediate_size
lowerCamelCase__: str =hidden_act
lowerCamelCase__: List[Any] =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: List[str] =max_position_embeddings
lowerCamelCase__: List[str] =type_vocab_size
lowerCamelCase__: Dict =type_sequence_label_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: int =num_labels
lowerCamelCase__: int =num_choices
lowerCamelCase__: List[str] =scope
lowerCamelCase__: Any =embedding_size
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: str =None
if self.use_input_mask:
lowerCamelCase__: List[str] =random_attention_mask([self.batch_size, self.seq_length])
lowerCamelCase__: Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__: Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCamelCase__: int =None
lowerCamelCase__: str =None
lowerCamelCase__: Any =None
if self.use_labels:
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size] , self.num_choices)
lowerCamelCase__: str =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =TFMobileBertModel(config=__lowercase)
lowerCamelCase__: List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: Dict =model(__lowercase)
lowerCamelCase__: Union[str, Any] =[input_ids, input_mask]
lowerCamelCase__: Optional[Any] =model(__lowercase)
lowerCamelCase__: str =model(__lowercase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =TFMobileBertForMaskedLM(config=__lowercase)
lowerCamelCase__: Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: Union[str, Any] =model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =TFMobileBertForNextSentencePrediction(config=__lowercase)
lowerCamelCase__: Any ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: Optional[int] =model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =TFMobileBertForPreTraining(config=__lowercase)
lowerCamelCase__: int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: Optional[Any] =model(__lowercase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.num_labels
lowerCamelCase__: Dict =TFMobileBertForSequenceClassification(config=__lowercase)
lowerCamelCase__: Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: List[Any] =model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_choices
lowerCamelCase__: Optional[Any] =TFMobileBertForMultipleChoice(config=__lowercase)
lowerCamelCase__: Dict =tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
lowerCamelCase__: Union[str, Any] =tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
lowerCamelCase__: Union[str, Any] =tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
lowerCamelCase__: Any ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase__: Optional[int] =model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.num_labels
lowerCamelCase__: Optional[int] =TFMobileBertForTokenClassification(config=__lowercase)
lowerCamelCase__: Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: Optional[Any] =model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: int =TFMobileBertForQuestionAnswering(config=__lowercase)
lowerCamelCase__: List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__: Any =model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.prepare_config_and_inputs()
(
lowerCamelCase__
): List[str] =config_and_inputs
lowerCamelCase__: str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =TFMobileBertModelTest.TFMobileBertModelTester(self)
lowerCamelCase__: str =ConfigTester(self , config_class=__lowercase , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowercase)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowercase)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase__: List[str] =TFMobileBertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
lowerCamelCase__: Any =tf.constant([[0, 1, 2, 3, 4, 5]])
lowerCamelCase__: int =model(__lowercase)[0]
lowerCamelCase__: Optional[Any] =[1, 6, 30_522]
self.assertEqual(output.shape , __lowercase)
lowerCamelCase__: Dict =tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-4)
| 10
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 0
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> List[str]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = len(set_a.intersection(SCREAMING_SNAKE_CASE_ ) )
if alternative_union:
UpperCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = len(set_a.union(SCREAMING_SNAKE_CASE_ ) )
return intersection / union
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
UpperCamelCase_ = [element for element in set_a if element in set_b]
if alternative_union:
UpperCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) / union
else:
UpperCamelCase_ = set_a + [element for element in set_b if element not in set_a]
return len(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = {"""a""", """b""", """c""", """d""", """e"""}
SCREAMING_SNAKE_CASE :Union[str, Any] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 60
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE :Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None )-> Any:
UpperCamelCase_ = None
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase_ = os.path.abspath("examples" )
for item in os.listdir(_lowercase ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowercase , feature_script=_lowercase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase_ = compare_against_test(
os.path.join(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = "\n".join(_lowercase )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ = diff.replace(_lowercase , "" )
self.assertEqual(_lowercase , "" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , _lowercase )
self.one_complete_example("complete_nlp_example.py" , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase_ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = False
@classmethod
def UpperCAmelCase_ ( cls )-> List[str]:
super().setUpClass()
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls )-> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
else:
self.assertIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
UpperCamelCase_ = re.findall("({.+})" , _lowercase )
UpperCamelCase_ = [r for r in results if "accuracy" in r][-1]
UpperCamelCase_ = ast.literal_eval(_lowercase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "tracking" ) ) )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 60
| 1
|
import qiskit
def a( A : int , A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
a = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
_lowercase: Tuple = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 227
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
if config is None:
assert isinstance(self.model, lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase__ = AdamW
lowercase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase )
return scheduler
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = inputs.pop('''labels''' )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return loss
def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ):
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase )
lowercase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
lowercase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 207
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(lowercase__ , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase_ :int = tf.shape(lowercase__ )
if tensor.shape == tf.TensorShape(lowercase__ ):
return dynamic
lowerCAmelCase_ :str = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )]
def _snake_case ( lowercase__ : tf.Tensor , lowercase__ : Optional[int] = None , lowercase__ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowercase__ , name=lowercase__ )
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : str=1E-5 , lowercase__ : Dict=-1 ) -> List[str]:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase_ :str = [1] * inputs.shape.rank
lowerCAmelCase_ :Union[str, Any] = shape_list(lowercase__ )[axis]
lowerCAmelCase_ :str = tf.reshape(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = tf.reshape(lowercase__ , lowercase__ )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase_ :str = tf.nn.batch_normalization(
lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , )
return outputs
def _snake_case ( lowercase__ : List[str] , lowercase__ : str=0 , lowercase__ : Tuple=-1 ) -> List[str]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase_ :List[str] = tf.shape(lowercase__ )
lowerCAmelCase_ :Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase_ :Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(lowercase__ , tf.Tensor ):
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase_ :Union[str, Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase_ :Tuple = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( lowercase__ : tf.Tensor , lowercase__ : int , lowercase__ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase_ :int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase_ :Dict = [x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowerCAmelCase_ :Optional[int] = np.asarray(lowercase__ )
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :Union[str, Any] = np.array_split(lowercase__ , lowercase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase_ :int = np.array_split(lowercase__ , lowercase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[Any] = chunk_data
else:
lowerCAmelCase_ :Tuple = data
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
if name in group.attrs:
lowerCAmelCase_ :List[str] = [n.decode("""utf8""" ) if hasattr(lowercase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Dict = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(lowercase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
def _expand_single_ad_tensor(lowercase__ : Union[str, Any] ):
if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
__A = tempfile.mkdtemp()
__A = 5
# Realm tok
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,"realm_tokenizer" )
os.makedirs(A ,exist_ok=A )
__A = os.path.join(A ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A = os.path.join(self.tmpdirname ,"realm_block_records" )
os.makedirs(A ,exist_ok=A )
def UpperCamelCase_ ( self : int ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"realm_tokenizer" ) )
def UpperCamelCase_ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def UpperCamelCase_ ( self : List[Any] ):
__A = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] ,dtype=A ,)
return block_records
def UpperCamelCase_ ( self : Tuple ):
__A = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3] ,dtype="long" )
__A = tokenizer(["Test question"] ).input_ids
__A = tokenizer(
["the fourth"] ,add_special_tokens=A ,return_token_type_ids=A ,return_attention_mask=A ,).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
A ,A ,answer_ids=A ,max_length=A ,return_tensors="np" )
self.assertEqual(len(A ) ,2 )
self.assertEqual(len(A ) ,2 )
self.assertEqual(len(A ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] ,)
def UpperCamelCase_ ( self : Dict ):
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3, 5] ,dtype="long" )
__A = tokenizer(["Test question"] ).input_ids
__A = tokenizer(
["the fourth", "longer longer"] ,add_special_tokens=A ,return_token_type_ids=A ,return_attention_mask=A ,).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
A ,A ,answer_ids=A ,max_length=A ,return_tensors="np" )
self.assertEqual([False, True, True] ,A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) )
# Test local path
__A = retriever.from_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) )
self.assertEqual(retriever.block_records[0] ,B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
__A = os.path.join(
os.path.join(self.tmpdirname ,"realm_block_records" ) ,_REALM_BLOCK_RECORDS_FILENAME )
__A = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] ,B"This is the first record" )
| 15
|
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
lowercase_ :Dict = parent
lowercase_ :int = batch_size
lowercase_ :List[Any] = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Union[str, Any] = use_attention_mask
lowercase_ :int = use_token_type_ids
lowercase_ :Union[str, Any] = use_labels
lowercase_ :Any = vocab_size
lowercase_ :int = hidden_size
lowercase_ :List[str] = num_hidden_layers
lowercase_ :Union[str, Any] = num_attention_heads
lowercase_ :Dict = intermediate_size
lowercase_ :Tuple = hidden_act
lowercase_ :List[str] = hidden_dropout_prob
lowercase_ :Tuple = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :List[str] = type_vocab_size
lowercase_ :List[Any] = type_sequence_label_size
lowercase_ :int = initializer_range
lowercase_ :Optional[Any] = num_choices
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_attention_mask:
lowercase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :Tuple = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.prepare_config_and_inputs()
lowercase_ :Optional[Any] = config_and_inputs
lowercase_ :Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] =True
lowercase : List[Any] =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase_ :Optional[int] = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=UpperCamelCase_ )
lowercase_ :Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self ):
lowercase_ :int = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase_ :Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase_ :Optional[Any] = model(UpperCamelCase_ )[0]
lowercase_ :Optional[int] = 5_0000
lowercase_ :int = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase_ :Optional[Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] ="""gpt_bigcode"""
lowercase : Dict =["""past_key_values"""]
lowercase : List[Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=5_0257 , UpperCamelCase_=1024 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=None , UpperCamelCase_="gelu_pytorch_tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
lowercase_ :Any = vocab_size
lowercase_ :List[str] = n_positions
lowercase_ :Union[str, Any] = n_embd
lowercase_ :Dict = n_layer
lowercase_ :Optional[int] = n_head
lowercase_ :List[str] = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :Optional[int] = resid_pdrop
lowercase_ :Union[str, Any] = embd_pdrop
lowercase_ :Any = attn_pdrop
lowercase_ :Optional[Any] = layer_norm_epsilon
lowercase_ :str = initializer_range
lowercase_ :Optional[Any] = scale_attn_weights
lowercase_ :Any = use_cache
lowercase_ :Union[str, Any] = attention_softmax_in_fpaa
lowercase_ :int = scale_attention_softmax_in_fpaa
lowercase_ :Union[str, Any] = multi_query
lowercase_ :List[str] = bos_token_id
lowercase_ :Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=SCREAMING_SNAKE_CASE_ , speech_processor=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1_60_00 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = self.speech_processor.feature_extractor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , sampling_rate=SCREAMING_SNAKE_CASE_ ).input_features.to(self.device )
UpperCamelCase__ = self.speech_model.generate(SCREAMING_SNAKE_CASE_ , max_length=48_00_00 )
UpperCamelCase__ = self.speech_processor.tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , normalize=SCREAMING_SNAKE_CASE_ )[
0
]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(SCREAMING_SNAKE_CASE_ )}." )
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_embeddings.shape
UpperCamelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ = 42
if negative_prompt is None:
UpperCamelCase__ = [""""""] * batch_size
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !="
F" {type(SCREAMING_SNAKE_CASE_ )}." )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = text_input_ids.shape[-1]
UpperCamelCase__ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = uncond_embeddings.shape[1]
UpperCamelCase__ = uncond_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1 / 0.1_8215 * latents
UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 178
|
from __future__ import annotations
from collections import Counter
from random import random
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = probability
def UpperCAmelCase_ (self ):
return list(self.connections )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 0
UpperCamelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __magic_name__ ( __a : str , __a : list[tuple[str, str, float]] , __a : int ):
'''simple docstring'''
UpperCamelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
UpperCamelCase__ = Counter(graph.get_nodes() )
UpperCamelCase__ = start
for _ in range(__a ):
UpperCamelCase__ = graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
| 1
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
snake_case__ : Tuple = datasets.logging.get_logger(__name__)
snake_case__ : Any = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
snake_case__ : Any = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
snake_case__ : Dict = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple ):
if self.config_name == "default":
lowerCAmelCase : Tuple = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
lowerCAmelCase : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=False ):
if gpus is None:
lowerCAmelCase : Optional[int] = 1 if torch.cuda.is_available() else 0
lowerCAmelCase : str = {'''src''': sources, '''mt''': predictions, '''ref''': references}
lowerCAmelCase : Optional[int] = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )]
lowerCAmelCase, lowerCAmelCase : Tuple = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 60
|
"""simple docstring"""
snake_case__ : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 60
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a :Optional[Any] = logging.get_logger(__name__)
a :Dict[Optional[str], Type[Formatter]] = {}
a :Dict[Optional[str], str] = {}
a :Dict[Optional[str], Exception] = {}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
SCREAMING_SNAKE_CASE__ : int = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
SCREAMING_SNAKE_CASE__ : Tuple = format_type
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
a :int = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
a :Tuple = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
a :Dict = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def _lowercase ( __lowerCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> Formatter:
SCREAMING_SNAKE_CASE__ : List[Any] = get_format_type_from_alias(__lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 56
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a :Optional[int] = True
from torch.cuda.amp import autocast
a :str = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""})
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE__ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE__ : int = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :WavaVecaForPreTraining
_SCREAMING_SNAKE_CASE :WavaVecaFeatureExtractor
_SCREAMING_SNAKE_CASE :Union[bool, str] = "longest"
_SCREAMING_SNAKE_CASE :Optional[int] = None
_SCREAMING_SNAKE_CASE :Optional[int] = None
def __call__( self , _a ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
SCREAMING_SNAKE_CASE__ : Any = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE__ : List[Any] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : List[str] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE__ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , _a=1 , _a=0 , _a=1.0 , **_a ) -> str:
"""simple docstring"""
super().__init__(*_a , **_a )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Dict = max_gumbel_temp
SCREAMING_SNAKE_CASE__ : str = min_gumbel_temp
SCREAMING_SNAKE_CASE__ : Optional[Any] = gumbel_temp_decay
def _a ( self , _a , _a ) -> torch.Tensor:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.compute_loss(_a , _a )
else:
SCREAMING_SNAKE_CASE__ : str = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ : Dict = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowercase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Any = DatasetDict()
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : str = DatasetDict()
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE__ : List[str] = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE__ : str = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaForPreTraining(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase_ = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
UpperCAmelCase_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowerCAmelCase_ ( snake_case_ : tf.Tensor , snake_case_ : Optional[int] = None , snake_case_ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Optional[Any]=1E-5 , snake_case_ : str=-1 ) -> int:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
UpperCAmelCase_ , UpperCAmelCase_ = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase_ = [1] * inputs.shape.rank
UpperCAmelCase_ = shape_list(snake_case_ )[axis]
UpperCAmelCase_ = tf.reshape(snake_case_ , snake_case_ )
UpperCAmelCase_ = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase_ = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any]=0 , snake_case_ : Any=-1 ) -> List[Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase_ = tf.shape(snake_case_ )
UpperCAmelCase_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(snake_case_ , tf.Tensor ):
UpperCAmelCase_ = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase_ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( snake_case_ : tf.Tensor , snake_case_ : int , snake_case_ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase_ = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
UpperCAmelCase_ = np.asarray(snake_case_ )
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase_ = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
UpperCAmelCase_ = chunk_data
else:
UpperCAmelCase_ = data
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str ) -> Tuple:
'''simple docstring'''
if name in group.attrs:
UpperCAmelCase_ = [n.decode("utf8" ) if hasattr(snake_case_ , "decode" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(snake_case_ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
def _expand_single_ad_tensor(snake_case_ : Optional[int] ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 1
|
def UpperCAmelCase_ (_lowerCAmelCase : int = 50 ):
__UpperCamelCase : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {"vocab_file": "spm_char.model"}
lowercase : Tuple = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
lowercase : Union[str, Any] = {
"microsoft/speecht5_asr": 1024,
"microsoft/speecht5_tts": 1024,
"microsoft/speecht5_vc": 1024,
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
__UpperCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
__UpperCamelCase : List[Any] = vocab_file
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Any = self.__dict__.copy()
__UpperCamelCase : Union[str, Any] = None
return state
def __setstate__( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : List[Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def __lowerCamelCase ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
__UpperCamelCase : Any = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
__UpperCamelCase : str = [1]
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + suffix_ones
return ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase : Optional[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
__UpperCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 171
| 0
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "t5"
lowercase__ = ["past_key_values"]
lowercase__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Dict ,lowercase_ : List[Any]=3_2_1_2_8 ,lowercase_ : List[str]=5_1_2 ,lowercase_ : List[str]=6_4 ,lowercase_ : str=2_0_4_8 ,lowercase_ : Any=6 ,lowercase_ : Any=None ,lowercase_ : Any=8 ,lowercase_ : List[Any]=3_2 ,lowercase_ : Dict=1_2_8 ,lowercase_ : List[Any]=0.1 ,lowercase_ : Any=1E-6 ,lowercase_ : Any=1.0 ,lowercase_ : List[Any]="relu" ,lowercase_ : List[str]=True ,lowercase_ : List[Any]=True ,lowercase_ : Union[str, Any]=0 ,lowercase_ : Tuple=1 ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Union[str, Any] = d_model
lowerCAmelCase__ : int = d_kv
lowerCAmelCase__ : str = d_ff
lowerCAmelCase__ : Optional[Any] = num_layers
lowerCAmelCase__ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ : Dict = num_heads
lowerCAmelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCAmelCase__ : Optional[Any] = relative_attention_max_distance
lowerCAmelCase__ : Any = dropout_rate
lowerCAmelCase__ : Optional[Any] = layer_norm_epsilon
lowerCAmelCase__ : Dict = initializer_factor
lowerCAmelCase__ : str = feed_forward_proj
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = self.feed_forward_proj.split('''-''' )
lowerCAmelCase__ : Tuple = act_info[-1]
lowerCAmelCase__ : List[Any] = act_info[0] == '''gated'''
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowercase_ ,eos_token_id=lowercase_ ,is_encoder_decoder=lowercase_ ,**lowercase_ ,)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowerCAmelCase__ : Tuple = '''past_encoder_sequence + sequence'''
lowerCAmelCase__ : List[Any] = {0: '''batch'''}
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ ,direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Any ):
return 1_3
| 106
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCAmelCase, UpperCAmelCase : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCAmelCase : Dict = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCAmelCase : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase : str = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : str = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_config()
A__ = 3_00
return config
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = DebertaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 69
| 0
|
def __UpperCAmelCase ( a_):
snake_case_ = [0 for i in range(len(a_))]
# initialize interval's left pointer and right pointer
snake_case_ , snake_case_ = 0, 0
for i in range(1 , len(a_)):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ = min(right_pointer - i + 1 , z_result[i - left_pointer])
snake_case_ = min_edge
while go_next(a_ , a_ , a_):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_ , snake_case_ = i, i + z_result[i] - 1
return z_result
def __UpperCAmelCase ( a_ , a_ , a_):
return i + z_result[i] < len(a_) and s[z_result[i]] == s[i + z_result[i]]
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ = z_function(pattern + input_str)
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(a_):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline
_SCREAMING_SNAKE_CASE = ["""prompt"""]
_SCREAMING_SNAKE_CASE = ["""prompt""", """negative_prompt"""]
_SCREAMING_SNAKE_CASE = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase_ : str = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
lowerCAmelCase_ : Optional[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase_ : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowerCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE_ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_resize=SCREAMING_SNAKE_CASE_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Optional[int] = self.dummy_prior
lowerCAmelCase_ : Optional[int] = self.dummy_image_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = self.dummy_tokenizer
lowerCAmelCase_ : str = self.dummy_image_processor
lowerCAmelCase_ : List[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=10.0 , )
lowerCAmelCase_ : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase_ : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = 'cpu'
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : List[Any] = output.image_embeds
lowerCAmelCase_ : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCAmelCase_ : Optional[int] = image[0, -1_0:]
lowerCAmelCase_ : int = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowerCAmelCase_ : Any = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : int = torch_device == 'cpu'
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Any = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = torch_device == 'cpu'
lowerCAmelCase_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE_ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , )
| 289
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int ):
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[Any] = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase_ : Any = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.update(SCREAMING_SNAKE_CASE_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : str ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
lowerCAmelCase_ : Union[str, Any] = token_ids
lowerCAmelCase_ : Union[str, Any] = len(self.token_ids )
lowerCAmelCase_ : Union[str, Any] = -1 # the index of the currently fulfilled step
lowerCAmelCase_ : Dict = False
def SCREAMING_SNAKE_CASE__ ( self : int ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.fulfilled_idx += 1
lowerCAmelCase_ : Optional[int] = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[str] = completed
else:
# failed to make progress.
lowerCAmelCase_ : Optional[Any] = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = 0
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple=False ):
lowerCAmelCase_ : Any = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ : int = self.seqlen
lowerCAmelCase_ : Dict = self.fulfilled_idx
lowerCAmelCase_ : Optional[Any] = self.completed
return new_constraint
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
lowerCAmelCase_ : Tuple = max([len(SCREAMING_SNAKE_CASE_ ) for one in nested_token_ids] )
lowerCAmelCase_ : Optional[Any] = {}
for token_ids in nested_token_ids:
lowerCAmelCase_ : Union[str, Any] = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE_ ):
if token_id not in level:
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Tuple = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
lowerCAmelCase_ : Union[str, Any] = root
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : str = self.trie
for current_token in current_seq:
lowerCAmelCase_ : Optional[int] = start[current_token]
lowerCAmelCase_ : Dict = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Any = self.next_tokens(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 0
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Tuple = list(root.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE_ ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Any = self.count_leaves(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) != leaf_count
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
lowerCAmelCase_ : Dict = DisjunctiveTrie(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = nested_token_ids
lowerCAmelCase_ : Tuple = self.trie.max_height
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : int = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : Optional[int] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.current_seq.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = True
else:
lowerCAmelCase_ : List[str] = True
self.reset()
lowerCAmelCase_ : Dict = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase_ : List[str] = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[Any] = []
def SCREAMING_SNAKE_CASE__ ( self : str ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False ):
lowerCAmelCase_ : Dict = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ : Dict = self.seqlen
lowerCAmelCase_ : Optional[Any] = self.current_seq
lowerCAmelCase_ : List[Any] = self.completed
return new_constraint
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Constraint] ):
lowerCAmelCase_ : Optional[int] = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase_ : Optional[int] = max([c.seqlen for c in constraints] )
lowerCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Optional[int] = [constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase_ : List[Any] = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase_ : Dict = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = self.add(SCREAMING_SNAKE_CASE_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = False, False
if self.completed:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase_ : str = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase_ : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : str = pending_constraint.update(SCREAMING_SNAKE_CASE_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = None
if not complete and stepped:
lowerCAmelCase_ : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase_ : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase_ : Optional[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=True ):
lowerCAmelCase_ : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase_ : Any = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase_ : List[str] = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 289
| 1
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__UpperCAmelCase ), magnitude * sin(__UpperCAmelCase )]
return [magnitude * cos(radians(__UpperCAmelCase ) ), magnitude * sin(radians(__UpperCAmelCase ) )]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
snake_case_ = cross(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = sum(__UpperCAmelCase )
return abs(__UpperCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a : Union[str, Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a : List[Any] = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = DPTConfig()
if "large" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = [5, 11, 17, 23]
snake_case_ = [256, 512, 1024, 1024]
snake_case_ = (1, 384, 384)
if "ade" in checkpoint_url:
snake_case_ = True
snake_case_ = 150
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''ade20k-id2label.json'''
snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' )
if "pos_embed" in name:
snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' )
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case_ = name.replace('''proj''', '''projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''layer''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''', '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case_ = name.replace('''scratch.output_conv''', '''head''' )
if "scratch" in name:
snake_case_ = name.replace('''scratch''', '''neck''' )
if "layer1_rn" in name:
snake_case_ = name.replace('''layer1_rn''', '''convs.0''' )
if "layer2_rn" in name:
snake_case_ = name.replace('''layer2_rn''', '''convs.1''' )
if "layer3_rn" in name:
snake_case_ = name.replace('''layer3_rn''', '''convs.2''' )
if "layer4_rn" in name:
snake_case_ = name.replace('''layer4_rn''', '''convs.3''' )
if "refinenet" in name:
snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
snake_case_ = name.replace('''out_conv''', '''projection''' )
if "resConfUnit1" in name:
snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' )
if "conv1" in name:
snake_case_ = name.replace('''conv1''', '''convolution1''' )
if "conv2" in name:
snake_case_ = name.replace('''conv2''', '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case_ = name.replace('''pretrained''', '''dpt''' )
if "bn" in name:
snake_case_ = name.replace('''bn''', '''batch_norm''' )
if "head" in name:
snake_case_ = name.replace('''head''', '''head.head''' )
if "encoder.norm" in name:
snake_case_ = name.replace('''encoder.norm''', '''layernorm''' )
if "auxlayer" in name:
snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' )
return name
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: config.hidden_size, :]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase )
# load original state_dict from URL
snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(__UpperCAmelCase )
snake_case_ = val
# read in qkv matrices
read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase )
# load HuggingFace model
snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# Check outputs on an image
snake_case_ = 480 if '''ade''' in checkpoint_url else 384
snake_case_ = DPTImageProcessor(size=__UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' )
# forward pass
snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth
# Assert logits
snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__UpperCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase )
)
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, )
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 56
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[Any], a_: List[str], a_: int, a_: Tuple, a_: Any ):
if index == r:
for j in range(__SCREAMING_SNAKE_CASE ):
print(data[j], end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase : List[Any] = arr[i]
combination_util(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, index + 1, __SCREAMING_SNAKE_CASE, i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __UpperCAmelCase ( a_: List[Any], a_: List[str], a_: Optional[Any] ):
_UpperCAmelCase : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, 0, __SCREAMING_SNAKE_CASE, 0 )
if __name__ == "__main__":
# Driver code to check the function above
__a = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 369
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = ffn_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Tuple = 1
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = TFXGLMModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" )
_UpperCAmelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
_UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[int] = "left"
# use different length sentences to test batching
_UpperCAmelCase : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = inputs["input_ids"]
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 17
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__a :int = None
__a :Dict = logging.get_logger(__name__)
__a :Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a :Optional[Any] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__a :List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__a :int = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class _a ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = ['input_ids', 'attention_mask']
_lowerCamelCase : Tuple = MBartTokenizer
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = []
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : str="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : str=None , UpperCAmelCase : Dict=None , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[int] , ):
A_ = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else """en_XX"""
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self : List[Any] ):
return self._src_lang
@src_lang.setter
def __A ( self : int , UpperCAmelCase : Union[str, Any] ):
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : str = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
A_ = self.convert_tokens_to_ids(_lowerCamelCase )
A_ = tgt_lang_id
return inputs
def __A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : int = "en_XX" , UpperCAmelCase : int = None , UpperCAmelCase : Optional[int] = "ro_RO" , **UpperCAmelCase : List[Any] , ):
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __A ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCAmelCase : Tuple ):
A_ = self.convert_tokens_to_ids(_lowerCamelCase )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : Optional[int] , UpperCAmelCase : Union[str, Any] ):
A_ = self.convert_tokens_to_ids(_lowerCamelCase )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 312
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Dict = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Dict = num_heads
UpperCAmelCase__ : Any = window_size
UpperCAmelCase__ : str = mlp_ratio
UpperCAmelCase__ : str = qkv_bias
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = drop_path_rate
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = use_absolute_embeddings
UpperCAmelCase__ : Optional[int] = patch_norm
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Tuple = encoder_stride
UpperCAmelCase__ : Optional[int] = out_features
UpperCAmelCase__ : str = out_indices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
UpperCAmelCase__ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["""stem"""]
UpperCAmelCase__ : List[Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = MaskFormerSwinModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : str = outputs.hidden_states
UpperCAmelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
UpperCAmelCase__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = 0
return t
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"""
F""" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."""
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
UpperCAmelCase__ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MaskFormerSwinModelTester(self )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
UpperCAmelCase__ : int = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 171
| 0
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
_lowerCAmelCase : Dict = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str )-> Optional[int]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Any:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=A__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[int] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase__ : Dict = test_hf_cache_home / "datasets"
UpperCAmelCase__ : List[str] = test_hf_cache_home / "metrics"
UpperCAmelCase__ : int = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(A__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(A__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(A__ ) )
UpperCAmelCase__ : List[Any] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(A__ ) )
UpperCAmelCase__ : List[Any] = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A__ ) )
@pytest.fixture(autouse=A__ , scope="session" )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , A__ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Optional[int]:
'''simple docstring'''
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , A__ )
| 358
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Union[str, Any] = KandinskyImgaImgPipeline
snake_case__ : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
snake_case__ : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case__ : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case__ : List[str] = False
@property
def _A ( self : Tuple ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Tuple ):
return self.time_input_dim
@property
def _A ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def _A ( self : str ):
return 100
@property
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _A ( self : Dict ):
torch.manual_seed(0 )
UpperCamelCase :Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCamelCase :List[Any] = MultilingualCLIP(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase :Union[str, Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def _A ( self : List[str] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :int = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = self.dummy_text_encoder
UpperCamelCase :List[str] = self.dummy_tokenizer
UpperCamelCase :str = self.dummy_unet
UpperCamelCase :Optional[int] = self.dummy_movq
UpperCamelCase :Tuple = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCamelCase :List[Any] = DDIMScheduler(**__lowerCamelCase )
UpperCamelCase :int = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _A ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int=0 ):
UpperCamelCase :Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
UpperCamelCase :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :str = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((256, 256) )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :int = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :int = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _A ( self : Dict ):
UpperCamelCase :Union[str, Any] = """cpu"""
UpperCamelCase :Tuple = self.get_dummy_components()
UpperCamelCase :Union[str, Any] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :Any = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :str = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :int = output.images
UpperCamelCase :List[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase :int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase :Union[str, Any] = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[int] ):
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
UpperCamelCase :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase :List[Any] = """A red cartoon frog, 4k"""
UpperCamelCase :int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
UpperCamelCase :int = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
UpperCamelCase :Tuple = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase :Optional[Any] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase :Any = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
UpperCamelCase :Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 351
|
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :List[str] = []
for i in range(len(__magic_name__ ) ):
UpperCamelCase :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase :Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase :str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[Image.Image]:
"""simple docstring"""
UpperCamelCase :int = []
for _ in range(__magic_name__ ):
# Create output image
UpperCamelCase :int = Image.new("""RGB""" , (len(cells[0] ), len(__magic_name__ )) )
UpperCamelCase :Tuple = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase :Union[str, Any] = 255 - cells[y][x] * 255
UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
UpperCamelCase :Any = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ : Any = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 62
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(lowercase )
for i in range(1 ,lowercase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(lowercase ,lowercase ,-1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 289
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289
| 1
|
'''simple docstring'''
import string
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ : Tuple = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ : List[Any] = string.ascii_uppercase.find(A__ )
lowerCAmelCase_ : int = num - key
if num < 0:
lowerCAmelCase_ : Tuple = num + len(string.ascii_uppercase )
lowerCAmelCase_ : int = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ : Union[str, Any] = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Any = input("""Encrypted message: """ )
lowerCAmelCase_ : Optional[Any] = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = cva.getAffineTransform(A__ , A__ )
return cva.warpAffine(A__ , A__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__A : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Dict = gray_img.shape
# set different points to rotate image
__A : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Dict = plt.figure(1)
__A : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 89
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(SCREAMING_SNAKE_CASE__ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Tuple = _distribute_shards(**SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Optional[int] = _split_gen_kwargs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
else:
lowercase : str = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
assert out == expected
| 20
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A : Tuple = logging.get_logger(__name__)
__A : Tuple = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _a ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] )->str:
super().__init__(*lowercase__ , **lowercase__ )
if config is None:
assert isinstance(self.model , lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
''' padding..''' )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowercase__ ( self : int , __UpperCamelCase : int )->List[Any]:
if self.optimizer is None:
_UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
_UpperCAmelCase = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {'''scale_parameter''': False, '''relative_step''': False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=lowercase__ , optim=lowercase__ , **lowercase__ , )
else:
_UpperCAmelCase = optimizer_cls(lowercase__ , **lowercase__ )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase__ )
return scheduler
def lowercase__ ( self : List[Any] )->Dict:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] )->List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**lowercase__ , use_cache=lowercase__ )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**lowercase__ , labels=lowercase__ , use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**lowercase__ , use_cache=lowercase__ )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(lowercase__ , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(lowercase__ , lowercase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = inputs.pop('''labels''' )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
return loss
def lowercase__ ( self : int , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCamelCase : bool , __UpperCamelCase : Optional[List[str]] = None , )->Union[str, Any]:
_UpperCAmelCase = self._prepare_inputs(lowercase__ )
_UpperCAmelCase = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **lowercase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs['''max_length'''] )
_UpperCAmelCase = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] )->Tuple:
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F' padded to `max_length`={max_length}' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 358
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326
| 0
|
"""simple docstring"""
from collections import deque
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
__a = process_name # process name
__a = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a = arrival_time
__a = burst_time # remaining burst time
__a = 0 # total time of the process wait in ready queue
__a = 0 # time from arrival time to completion time
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , ):
# total number of mlfq's queues
__a = number_of_queues
# time slice of queues that round robin algorithm applied
__a = time_slices
# unfinished process is in this ready_queue
__a = queue
# current time
__a = current_time
# finished process is in this sequence queue
__a = deque()
def __UpperCAmelCase ( self ):
__a = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self , _a ):
__a = []
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self , _a ):
__a = []
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self , _a ):
__a = []
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self , _a ):
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self , _a ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self , _a ):
__a = deque() # sequence deque of finished process
while len(_a ) != 0:
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a = 0
# set the process's turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# set the completion time
__a = self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self , _a , _a ):
__a = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a = 0
# set the finish time
__a = self.current_time
# update the process' turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowercase_ = Process("P1", 0, 5_3)
lowercase_ = Process("P2", 0, 1_7)
lowercase_ = Process("P3", 0, 6_8)
lowercase_ = Process("P4", 0, 2_4)
lowercase_ = 3
lowercase_ = [1_7, 2_5]
lowercase_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowercase_ = Process("P1", 0, 5_3)
lowercase_ = Process("P2", 0, 1_7)
lowercase_ = Process("P3", 0, 6_8)
lowercase_ = Process("P4", 0, 2_4)
lowercase_ = 3
lowercase_ = [1_7, 2_5]
lowercase_ = deque([Pa, Pa, Pa, Pa])
lowercase_ = MLFQ(number_of_queues, time_slices, queue, 0)
lowercase_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 45
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 0
|
__snake_case : Union[str, Any] ='Input must be a string of 8 numbers plus letter'
__snake_case : int ='TRWAGMYFPDXBNJZSQVHLCKE'
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Dict = f"""Expected string as input, found {type(lowerCamelCase_).__name__}"""
raise TypeError(lowerCamelCase_)
lowerCAmelCase__ : Tuple = spanish_id.replace('''-''' ,'''''').upper()
if len(lowerCamelCase_) != 9:
raise ValueError(lowerCamelCase_)
try:
lowerCAmelCase__ : Union[str, Any] = int(spanish_id_clean[0:8])
lowerCAmelCase__ : Optional[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase_) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase_)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=7 ,__lowerCamelCase=3 ,__lowerCamelCase=18 ,__lowerCamelCase=30 ,__lowerCamelCase=4_00 ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=[0.5, 0.5, 0.5] ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : List[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
lowerCAmelCase__ : List[str] = do_thumbnail
lowerCAmelCase__ : str = do_align_axis
lowerCAmelCase__ : Optional[Any] = do_pad
lowerCAmelCase__ : Tuple = do_normalize
lowerCAmelCase__ : List[Any] = image_mean
lowerCAmelCase__ : List[str] = image_std
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : int = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 94
| 1
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = generate_pascal_triangle(SCREAMING_SNAKE_CASE__ )
for row_idx in range(SCREAMING_SNAKE_CASE__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
lowercase__ : Tuple = []
for current_row_idx in range(SCREAMING_SNAKE_CASE__ ):
lowercase__ : Any = populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
triangle.append(SCREAMING_SNAKE_CASE__ )
return triangle
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase__ , lowercase__ : Union[str, Any] = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ):
calculate_current_element(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return current_row
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : str = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase__ : int = triangle[current_row_idx - 1][current_col_idx]
lowercase__ : Optional[Any] = above_to_left_elt + above_to_right_elt
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
lowercase__ : int = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE__ ):
lowercase__ : str = [0] + result[-1] + [0]
lowercase__ : int = row_index + 1
# Calculate the number of distinct elements in a row
lowercase__ : Tuple = sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) )
lowercase__ : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase__ : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase__ : List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE__ )
return result
def __lowerCamelCase ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ ) -> None:
lowercase__ : Optional[Any] = F"""{func.__name__}({value})"""
lowercase__ : Optional[int] = timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 130
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62
| 0
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowercase__ = """examples/"""
lowercase__ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowercase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowercase__ = """README.md"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ : Tuple = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Dict = replace.replace("VERSION" , lowercase__ )
lowerCAmelCase_ : Any = re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase__ )
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="examples" )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : int = """🤗 Transformers currently provides the following architectures"""
lowerCAmelCase_ : List[Any] = """1. Want to contribute a new model?"""
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase_ : str = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCAmelCase_ : List[str] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase__ )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCAmelCase_ : int = f.read()
lowerCAmelCase_ : List[Any] = REPLACE_PATTERNS["""init"""][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def __lowerCamelCase ( __UpperCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCAmelCase_ : Optional[Any] = default_version.base_version
elif patch:
lowerCAmelCase_ : Optional[int] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCAmelCase_ : Union[str, Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(lowercase__ ) == 0:
lowerCAmelCase_ : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Dict = get_version()
lowerCAmelCase_ : Dict = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCAmelCase_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(lowercase__ ) == 0:
lowerCAmelCase_ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowercase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 356
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowercase__ = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 161
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[np.ndarray, np.ndarray]:
_a , _a : List[Any] = np.shape(lowerCAmelCase_ )
if rows != columns:
_a : Any = (
'\'table\' has to be of square shaped array but got a '
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowerCAmelCase_ )
_a : str = np.zeros((rows, columns) )
_a : Dict = np.zeros((rows, columns) )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
_a : Any = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_a : Union[str, Any] = (table[i][j] - total) / upper[j][j]
_a : int = 1
for j in range(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : Tuple = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase_ ) )
_a : List[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : int ="encoder-decoder"
UpperCamelCase__ : List[Any] =True
def __init__( self :str , **_lowercase :Any) -> List[Any]:
super().__init__(**_lowercase)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ = kwargs.pop('''encoder''')
UpperCAmelCase_ = encoder_config.pop('''model_type''')
UpperCAmelCase_ = kwargs.pop('''decoder''')
UpperCAmelCase_ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ = AutoConfig.for_model(_lowercase , **_lowercase)
UpperCAmelCase_ = AutoConfig.for_model(_lowercase , **_lowercase)
UpperCAmelCase_ = True
@classmethod
def __a ( cls :int , _lowercase :PretrainedConfig , _lowercase :PretrainedConfig , **_lowercase :str) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
UpperCAmelCase_ = True
UpperCAmelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowercase)
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.encoder.to_dict()
UpperCAmelCase_ = self.decoder.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __snake_case ):
_UpperCAmelCase :int = ['image_processor', 'tokenizer']
_UpperCAmelCase :Optional[Any] = 'CLIPImageProcessor'
_UpperCAmelCase :List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
UpperCamelCase : int = kwargs.pop("feature_extractor" )
UpperCamelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCamelCase : int = self.tokenizer(A_ , return_tensors=A_ , **A_ )
if images is not None:
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def __UpperCamelCase( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 52
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 'swinv2'
SCREAMING_SNAKE_CASE : str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str ,lowercase__ : Union[str, Any]=2_2_4 ,lowercase__ : Tuple=4 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : int=9_6 ,lowercase__ : Dict=[2, 2, 6, 2] ,lowercase__ : List[str]=[3, 6, 1_2, 2_4] ,lowercase__ : str=7 ,lowercase__ : Tuple=4.0 ,lowercase__ : List[str]=True ,lowercase__ : Union[str, Any]=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : Any=0.1 ,lowercase__ : str="gelu" ,lowercase__ : Dict=False ,lowercase__ : int=0.0_2 ,lowercase__ : Dict=1e-5 ,lowercase__ : int=3_2 ,**lowercase__ : str ,):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
__lowercase = (0, 0, 0, 0)
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''GLPNFeatureExtractor''']
lowerCAmelCase__ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
snake_case : Union[str, Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE__ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
SCREAMING_SNAKE_CASE__ = 'translator'
SCREAMING_SNAKE_CASE__ = AutoTokenizer
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE__ = LANGUAGE_CODES
SCREAMING_SNAKE_CASE__ = ['text', 'text', 'text']
SCREAMING_SNAKE_CASE__ = ['text']
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
a :str = self.lang_to_code[src_lang]
a :Optional[Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCamelCase , return_tensors='''pt''' , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.model.generate(**_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCamelCase )
| 94
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if audios is not None:
a :Tuple = self.feature_extractor(
_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and audios is not None:
a :Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.tokenizer.model_input_names
a :str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 94
| 1
|
from heapq import heappop, heappush
import numpy as np
def A(__a: np.ndarray , __a: tuple[int, int] , __a: tuple[int, int] , __a: bool , ):
lowerCAmelCase_ , lowerCAmelCase_ = grid.shape
lowerCAmelCase_ = [-1, 1, 0, 0]
lowerCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase_ , lowerCAmelCase_ = [(0, source)], set()
lowerCAmelCase_ = np.full((rows, cols) , np.inf )
lowerCAmelCase_ = 0
lowerCAmelCase_ = np.empty((rows, cols) , dtype=__a )
lowerCAmelCase_ = None
while queue:
((lowerCAmelCase_) , (lowerCAmelCase_)) = heappop(__a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase_ , lowerCAmelCase_ = predecessors[x, y]
path.append(__a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__a ) ):
lowerCAmelCase_ , lowerCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__a , (dist + 1, (nx, ny)) )
lowerCAmelCase_ = dist + 1
lowerCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = 1_024
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4_096
SCREAMING_SNAKE_CASE__ : Tuple = 24
SCREAMING_SNAKE_CASE__ : Any = 16
SCREAMING_SNAKE_CASE__ : Any = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [256, 512, 1_024, 1_024]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 150
SCREAMING_SNAKE_CASE__ : int = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : int = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(cached_download(hf_hub_url(_snake_case ,_snake_case ,repo_type="""dataset""" ) ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowercase_ ( _snake_case ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.encoder""" )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.embeddings""" )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""patch_embed""" ,"""patch_embeddings""" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pos_embed""" ,"""position_embeddings""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""proj""" ,"""projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""blocks""" ,"""layer""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""norm2""" ,"""layernorm_after""" )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""scratch.output_conv""" ,"""head""" )
if "scratch" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""scratch""" ,"""neck""" )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""layer1_rn""" ,"""convs.0""" )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer2_rn""" ,"""convs.1""" )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""layer3_rn""" ,"""convs.2""" )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer4_rn""" ,"""convs.3""" )
if "refinenet" in name:
SCREAMING_SNAKE_CASE__ : List[str] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace(f'''refinenet{layer_idx}''' ,f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""out_conv""" ,"""projection""" )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""resConfUnit1""" ,"""residual_layer1""" )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""resConfUnit2""" ,"""residual_layer2""" )
if "conv1" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""conv1""" ,"""convolution1""" )
if "conv2" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""conv2""" ,"""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained""" ,"""dpt""" )
if "bn" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""bn""" ,"""batch_norm""" )
if "head" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""head""" ,"""head.head""" )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""encoder.norm""" ,"""layernorm""" )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""auxlayer""" ,"""auxiliary_head.head""" )
return name
def lowercase_ ( _snake_case ,_snake_case ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : str = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dpt_config(_snake_case )
# load original state_dict from URL
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_snake_case )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_snake_case ,_snake_case )
# load HuggingFace model
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTForSemanticSegmentation(_snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE__ : Tuple = 480 if """ade""" in checkpoint_url else 384
SCREAMING_SNAKE_CASE__ : str = DPTImageProcessor(size=_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(_snake_case ,return_tensors="""pt""" )
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_snake_case ).logits if """ade""" in checkpoint_url else model(**_snake_case ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,_snake_case )
)
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=_snake_case ,)
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=_snake_case ,)
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 25
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
__A = cva.getAffineTransform(UpperCAmelCase , UpperCAmelCase )
return cva.warpAffine(UpperCAmelCase , UpperCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
a__ : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Optional[int] = gray_img.shape
# set different points to rotate image
a__ : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : int = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : List[str] = plt.figure(1)
a__ : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 161
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_a = parser.parse_args()
if args.model_type == "roberta":
_a = RobertaForMaskedLM.from_pretrained(args.model_name)
_a = "roberta"
elif args.model_type == "gpt2":
_a = GPTaLMHeadModel.from_pretrained(args.model_name)
_a = "transformer"
_a = model.state_dict()
_a = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_a = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_a = f"""{prefix}.embeddings.{w}.weight"""
_a = state_dict[param_name]
for w in ["weight", "bias"]:
_a = f"""{prefix}.embeddings.LayerNorm.{w}"""
_a = state_dict[param_name]
# Transformer Blocks #
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_a = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_a = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_a = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_a = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[f"""lm_head.dense.{w}"""]
_a = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_a = state_dict[f"""{prefix}.ln_f.{w}"""]
_a = state_dict["lm_head.weight"]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 350
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowercase = sorted(string.lower() )
return len(a_ ) == len(set(a_ ) )
if __name__ == "__main__":
UpperCAmelCase = input('''Enter a string ''').strip()
UpperCAmelCase = is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 195
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""google/electra-small-generator""": 5_1_2,
"""google/electra-base-generator""": 5_1_2,
"""google/electra-large-generator""": 5_1_2,
"""google/electra-small-discriminator""": 5_1_2,
"""google/electra-base-discriminator""": 5_1_2,
"""google/electra-large-discriminator""": 5_1_2,
}
__SCREAMING_SNAKE_CASE : Dict = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class lowerCamelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ElectraTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(snake_case__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**snake_case__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 353
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Tuple = 'gpt_neox_japanese'
def __init__( self , lowerCamelCase__=3_2_0_0_0 , lowerCamelCase__=2_5_6_0 , lowerCamelCase__=3_2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=1.0_0 , lowerCamelCase__=1_0_0_0_0 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=3_1_9_9_6 , lowerCamelCase__=3_1_9_9_9 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , **lowerCamelCase__ , ):
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_multiple_size
_lowerCamelCase = hidden_act
_lowerCamelCase = rotary_pct
_lowerCamelCase = rotary_emb_base
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_cache
_lowerCamelCase = attention_dropout
_lowerCamelCase = hidden_dropout
| 73
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( __a , unittest.TestCase ):
__a : Any = KandinskyVaaImgaImgPipeline
__a : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image"""]
__a : int = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__a : Union[str, Any] = False
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def A ( self : Dict ):
'''simple docstring'''
return 32
@property
def A ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return 100
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**lowercase )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , lowercase : int , lowercase : List[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowercase ) ).convert('''RGB''' ).resize((256, 256) )
if str(lowercase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowercase )
else:
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = '''A red cartoon frog, 4k'''
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 34
|
import functools
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[int] = len(_lowerCAmelCase )
UpperCamelCase : List[str] = len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
import sys
__A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = N ) -> int:
lowercase__: Any = -sys.maxsize - 1
for i in range(len(__UpperCAmelCase ) - 1_2 ):
lowercase__: Optional[int] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__: List[Any] = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 366
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( __lowercase : Sequence[float] , __lowercase : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_UpperCAmelCase = 0 if allow_empty_subarrays else float("-inf" )
_UpperCAmelCase = 0.0
for num in arr:
_UpperCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_UpperCAmelCase = max(__lowercase , __lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE :Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 22
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[List[PIL.Image.Image], np.ndarray]
snake_case__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : np.ndarray
snake_case__ : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62
| 1
|
import unittest
import numpy as np
def a__ ( A_, A_, A_, A_ = None, ):
'''simple docstring'''
__magic_name__ = np.shape(A_ )
__magic_name__ = np.shape(A_ )
__magic_name__ = np.shape(A_ )
if shape_a[0] != shape_b[0]:
__magic_name__ = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
__magic_name__ = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
__magic_name__ = pseudo_inv
if a_inv is None:
try:
__magic_name__ = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1], [6, 3]] )
__magic_name__ = schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = np.block([[a, b], [b.T, c]] )
__magic_name__ = np.linalg.det(UpperCamelCase__ )
__magic_name__ = np.linalg.det(UpperCamelCase__ )
__magic_name__ = np.linalg.det(UpperCamelCase__ )
self.assertAlmostEqual(UpperCamelCase__ , det_a * det_s )
def _lowercase ( self : List[Any] ) -> None:
"""simple docstring"""
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 88
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
lowercase__ = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase_ : Tuple = int(re.match(r".*layer_(\d*).*" , __UpperCamelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase_ : Any = re.search(r"[^\d](\d+)$" , str(__UpperCamelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCAmelCase_ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase_ : Optional[int] = BloomConfig()
else:
lowerCAmelCase_ : List[Any] = BloomConfig.from_json_file(__UpperCamelCase )
if shard_model:
lowerCAmelCase_ : Union[str, Any] = os.listdir(__UpperCamelCase )
lowerCAmelCase_ : str = sorted(filter(lambda __UpperCamelCase : s.startswith("layer" ) and "model_00" in s , __UpperCamelCase ) )
lowerCAmelCase_ : Dict = {"weight_map": {}, "metadata": {}}
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Dict = BloomConfig()
for j, file in enumerate(__UpperCamelCase ):
print("Processing file: {}".format(__UpperCamelCase ) )
lowerCAmelCase_ : Any = None
for i in range(__UpperCamelCase ):
# load all TP files
lowerCAmelCase_ : Tuple = file.replace("model_00" , f'''model_0{i}''' )
lowerCAmelCase_ : Dict = torch.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCAmelCase_ : Tuple = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : List[str] = temp.pop(__UpperCamelCase )
if tensors is None:
lowerCAmelCase_ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : Dict = torch.cat([tensors[key], temp[key]] , dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Optional[int] = tensors[key] / pretraining_tp
torch.save(
__UpperCamelCase , os.path.join(
__UpperCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase_ : str = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCamelCase ) ).zfill(5 ) )
lowerCAmelCase_ : str = BloomConfig()
lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowerCAmelCase_ : Tuple = total_size
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
lowerCAmelCase_ : str = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + "\n"
f.write(__UpperCamelCase )
else:
lowerCAmelCase_ : int = BloomModel(__UpperCamelCase )
lowerCAmelCase_ : Any = os.listdir(__UpperCamelCase )
lowerCAmelCase_ : str = sorted(filter(lambda __UpperCamelCase : s.startswith("layer" ) and "model_00" in s , __UpperCamelCase ) )
lowerCAmelCase_ : Tuple = None
for i, file in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : int = None
for i in range(__UpperCamelCase ):
# load all TP files
lowerCAmelCase_ : Optional[Any] = file.replace("model_00" , f'''model_0{i}''' )
lowerCAmelCase_ : Optional[Any] = torch.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCAmelCase_ : Union[str, Any] = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : List[Any] = temp.pop(__UpperCamelCase )
if tensors is None:
lowerCAmelCase_ : Tuple = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : List[Any] = torch.cat([tensors[key], temp[key]] , dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : List[str] = tensors[key] / pretraining_tp
lowerCAmelCase_ : List[Any] = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
lowerCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
lowerCAmelCase_ : int = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCAmelCase_ : Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
lowercase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 369
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = """lilt"""
def __init__( self : Any , a_ : List[str]=3_05_22 , a_ : List[Any]=7_68 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : str=30_72 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=5_12 , a_ : List[str]=2 , a_ : int=0.02 , a_ : Optional[int]=1e-1_2 , a_ : Any=0 , a_ : str="absolute" , a_ : List[Any]=None , a_ : Optional[int]=4 , a_ : str=10_24 , **a_ : Union[str, Any] , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Tuple = position_embedding_type
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
lowerCAmelCase_ : Optional[Any] = channel_shrink_ratio
lowerCAmelCase_ : Dict = max_ad_position_embeddings
| 161
| 0
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_lowerCAmelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 0
|
"""simple docstring"""
snake_case_ = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 358
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : str , lowercase : Optional[int] , lowercase : List[str]=13 , lowercase : List[str]=7 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : Union[str, Any]=True , lowercase : List[str]=99 , lowercase : Optional[int]=16 , lowercase : Optional[int]=36 , lowercase : Union[str, Any]=6 , lowercase : Dict=6 , lowercase : Dict=6 , lowercase : Any=37 , lowercase : Tuple="gelu" , lowercase : Tuple=0.1 , lowercase : Tuple=0.1 , lowercase : Any=512 , lowercase : Dict=16 , lowercase : Union[str, Any]=2 , lowercase : List[str]=0.02 , lowercase : Any=3 , lowercase : Dict=4 , lowercase : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_hidden_groups
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A ( self : List[str] , lowercase : str , lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = AlbertModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
UpperCAmelCase = model(lowercase , token_type_ids=lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : Any , lowercase : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = AlbertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , sentence_order_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A ( self : str , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : int , lowercase : Dict , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = AlbertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , lowercase : Any , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : int , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = AlbertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : int , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , lowercase : List[Any] , lowercase : str , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = AlbertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowercase_ , lowercase_ , unittest.TestCase ):
__a : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Dict = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : int = True
def A ( self : Union[str, Any] , lowercase : Dict , lowercase : Any , lowercase : Any=False ):
'''simple docstring'''
UpperCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = AlbertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*lowercase )
@slow
def A ( self : Tuple ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AlbertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = AlbertModel.from_pretrained('''albert-base-v2''' )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
UpperCAmelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 34
|
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2
| 0
|
from math import factorial
def snake_case_(_UpperCamelCase = 100 ) -> Any:
"""simple docstring"""
return sum(map(a_ , str(factorial(a_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 367
|
from math import factorial
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_UpperCamelCase ) // (factorial(_UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 278
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A = 'pt'
elif is_tf_available():
_A = 'tf'
else:
_A = 'jax'
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ByTaTokenizer
UpperCAmelCase__ : List[Any] = False
def _a ( self ) -> Any:
super().setUp()
__UpperCamelCase =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> Any:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _a ( self , **A_ ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ , A_=False , A_=20 , A_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase =[]
for i in range(len(A_ ) ):
try:
__UpperCamelCase =tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase =list(filter(lambda A_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , A_ ) )
__UpperCamelCase =list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
__UpperCamelCase =toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
__UpperCamelCase =toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase =[t[0] for t in toks]
# Ensure consistency
__UpperCamelCase =tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
__UpperCamelCase =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
__UpperCamelCase =' ' + output_txt
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__UpperCamelCase =tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _a ( self ) -> Any:
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase ='Unicode €.'
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , A_ )
# decoding
__UpperCamelCase =tokenizer.decode(A_ )
self.assertEqual(A_ , 'Unicode €.</s>' )
__UpperCamelCase =tokenizer('e è é ê ë' )
__UpperCamelCase =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , A_ )
# decoding
__UpperCamelCase =tokenizer.decode(A_ )
self.assertEqual(A_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _a ( self ) -> Any:
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__UpperCamelCase =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__UpperCamelCase =tokenizer(A_ , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
if FRAMEWORK != "jax":
__UpperCamelCase =list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase =list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase =tokenizer(A_ , padding=A_ , return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , A_ )
self.assertIn('attention_mask' , A_ )
self.assertNotIn('decoder_input_ids' , A_ )
self.assertNotIn('decoder_attention_mask' , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =[
'Summary of the text.',
'Another summary.',
]
__UpperCamelCase =tokenizer(
text_target=A_ , max_length=32 , padding='max_length' , truncation=A_ , return_tensors=A_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =['A long paragraph for summarization. </s>']
__UpperCamelCase =['Summary of the text. </s>']
# fmt: off
__UpperCamelCase =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__UpperCamelCase =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__UpperCamelCase =tokenizer(A_ , text_target=A_ )
self.assertEqual(A_ , batch['input_ids'][0] )
self.assertEqual(A_ , batch['labels'][0] )
def _a ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =' He is very happy, UNwant\u00E9d,running'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__UpperCamelCase =tokenizer.__class__.from_pretrained(A_ )
__UpperCamelCase =after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
__UpperCamelCase =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__UpperCamelCase =tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__UpperCamelCase =tokenizer.__class__.from_pretrained(A_ )
__UpperCamelCase =after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCamelCase =tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase =json.load(A_ )
with open(os.path.join(A_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase =json.load(A_ )
__UpperCamelCase =[f'<extra_id_{i}>' for i in range(125 )]
__UpperCamelCase =added_tokens_extra_ids + [
'an_additional_special_token'
]
__UpperCamelCase =added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(A_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase =tokenizer_class.from_pretrained(
A_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase =added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=A_ )]
__UpperCamelCase =tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _a ( self ) -> List[Any]:
__UpperCamelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
__UpperCamelCase =tokenizer_class.from_pretrained(A_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def _a ( self ) -> int:
pass
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> List[Any]:
pass
def _a ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__UpperCamelCase =self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase =['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__UpperCamelCase =tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ , A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase =[
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__UpperCamelCase =0
__UpperCamelCase =tokenizer.convert_ids_to_tokens(
A_ , skip_special_tokens=A_ )
for attr in attributes_list:
setattr(A_ , attr + '_id' , A_ )
self.assertEqual(getattr(A_ , A_ ) , A_ )
self.assertEqual(getattr(A_ , attr + '_id' ) , A_ )
setattr(A_ , attr + '_id' , A_ )
self.assertEqual(getattr(A_ , A_ ) , A_ )
self.assertEqual(getattr(A_ , attr + '_id' ) , A_ )
setattr(A_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(A_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(A_ , 'additional_special_tokens_ids' ) , [] )
setattr(A_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(A_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(A_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 62
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase : Dict = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
snake_case_ = [image]
snake_case_ = [trans(img.convert('RGB' ) ) for img in image]
snake_case_ = torch.stack(UpperCamelCase__ )
return image
class lowercase ( lowercase_ ):
def __init__( self , snake_case , snake_case ):
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=snake_case , scheduler=snake_case )
def a ( self , snake_case ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self , snake_case , snake_case , snake_case ):
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength ) , snake_case )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ):
if not isinstance(snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case )}''' )
snake_case_ = image.to(device=snake_case , dtype=snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
# get latents
print('add noise to latents at timestep' , snake_case )
snake_case_ = self.scheduler.add_noise(snake_case , snake_case , snake_case )
snake_case_ = init_latents
return latents
@torch.no_grad()
def __call__( self , snake_case = None , snake_case = 0.8 , snake_case = 1 , snake_case = None , snake_case = 0.0 , snake_case = 50 , snake_case = None , snake_case = "pil" , snake_case = True , ):
self.check_inputs(snake_case )
# 2. Preprocess image
snake_case_ = preprocess(snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device )
snake_case_ , snake_case_ = self.get_timesteps(snake_case , snake_case , self.device )
snake_case_ = timesteps[:1].repeat(snake_case )
# 4. Prepare latent variables
snake_case_ = self.prepare_latents(snake_case , snake_case , snake_case , self.unet.dtype , self.device , snake_case )
snake_case_ = latents
# 5. Denoising loop
for t in self.progress_bar(snake_case ):
# 1. predict noise model_output
snake_case_ = self.unet(snake_case , snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
snake_case , snake_case , snake_case , eta=snake_case , use_clipped_model_output=snake_case , generator=snake_case , ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=snake_case )
| 350
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = '''roformer'''
def __init__( self , snake_case=5_0000 , snake_case=None , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1536 , snake_case=2 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
snake_case_ = vocab_size
snake_case_ = hidden_size if embedding_size is None else embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = rotary_value
snake_case_ = use_cache
class lowercase ( lowercase_ ):
@property
def a ( self ):
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 200
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 84
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase )] )
__A = np.array(UpperCAmelCase )
__A = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase ) ) , x.transpose() ) , UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = (1, 2, 1)
__A = (1, 1, 0, 7)
__A = SARIMAX(
UpperCAmelCase , exog=UpperCAmelCase , order=UpperCAmelCase , seasonal_order=UpperCAmelCase )
__A = model.fit(disp=UpperCAmelCase , maxiter=6_0_0 , method='nm' )
__A = model_fit.predict(1 , len(UpperCAmelCase ) , exog=[test_match] )
return result[0]
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCAmelCase , UpperCAmelCase )
__A = regressor.predict(UpperCAmelCase )
return y_pred[0]
def snake_case ( UpperCAmelCase )-> float:
"""simple docstring"""
train_user.sort()
__A = np.percentile(UpperCAmelCase , 2_5 )
__A = np.percentile(UpperCAmelCase , 7_5 )
__A = qa - qa
__A = qa - (iqr * 0.1)
return low_lim
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> bool:
"""simple docstring"""
__A = 0
__A = 0
for i in list_vote:
if i > actual_result:
__A = not_safe + 1
else:
if abs(abs(UpperCAmelCase ) - abs(UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
a__ : List[str] = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
a__ : Optional[int] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
a__ : List[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
a__ : Dict = normalize_df[:, 2].tolist()
a__ : Optional[int] = normalize_df[:, 0].tolist()
a__ : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
a__ : Tuple = normalize_df[:, [1, 2]].tolist()
a__ : Dict = x[: len(x) - 1]
a__ : Any = x[len(x) - 1 :]
# for linear regression & sarimax
a__ : Tuple = total_date[: len(total_date) - 1]
a__ : List[Any] = total_user[: len(total_user) - 1]
a__ : List[Any] = total_match[: len(total_match) - 1]
a__ : List[str] = total_date[len(total_date) - 1 :]
a__ : List[str] = total_user[len(total_user) - 1 :]
a__ : Tuple = total_match[len(total_match) - 1 :]
# voting system with forecasting
a__ : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
a__ : List[str] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 161
| 0
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( snake_case__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 364
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[Any] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
snake_case__ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
snake_case__ : Dict = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
snake_case__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 250
| 0
|
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCamelCase__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
UpperCamelCase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCamelCase__ = """allenai"""
def _a ( SCREAMING_SNAKE_CASE_ : str ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R"@@$" , "" , lowerCAmelCase__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowerCAmelCase__ ), v) for k, v in d.items() )
__lowerCAmelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
# prep
assert os.path.exists(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = basename(lowerCAmelCase__ )
__lowerCAmelCase = dirname(lowerCAmelCase__ )
__lowerCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowerCAmelCase = cls.hub_models()
__lowerCAmelCase = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
__lowerCAmelCase = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
__lowerCAmelCase = hub_utils.from_pretrained(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , archive_map=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowerCAmelCase = vars(chkpt["args"]["model"] )
__lowerCAmelCase = args['''source_lang''']
__lowerCAmelCase = args['''target_lang''']
__lowerCAmelCase = dirname(lowerCAmelCase__ )
__lowerCAmelCase = basename(lowerCAmelCase__ )
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , F"""dict.{src_lang}.txt""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , F"""dict.{tgt_lang}.txt""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase__ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase__ )
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , "vocab-src.json" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowerCAmelCase = True
for k in src_vocab.keys():
if not k.islower():
__lowerCAmelCase = False
break
__lowerCAmelCase = Dictionary.load(lowerCAmelCase__ )
__lowerCAmelCase = rewrite_dict_keys(tgt_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase__ )
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , "vocab-tgt.json" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
break
with open(lowerCAmelCase__ , encoding="utf-8" ) as fin:
__lowerCAmelCase = fin.read()
__lowerCAmelCase = re.sub(R" \d+$" , "" , lowerCAmelCase__ , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as fout:
fout.write(lowerCAmelCase__ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
__lowerCAmelCase = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
__lowerCAmelCase = 5
__lowerCAmelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowerCAmelCase = best_score_hparams[model_dir]['''length_penalty''']
else:
__lowerCAmelCase = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
__lowerCAmelCase = chkpt['''models'''][0]
__lowerCAmelCase = model.state_dict()
# rename keys to start with 'model.'
__lowerCAmelCase = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__lowerCAmelCase = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = FSMTConfig.from_pretrained(lowerCAmelCase__ )
__lowerCAmelCase = FSMTForConditionalGeneration(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 92
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 368
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__a = logging.getLogger(__name__)
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30_522, type=int)
__a = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
__a = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
__a = Counter()
for tk_ids in data:
counter.update(tk_ids)
__a = [0] * args.vocab_size
for k, v in counter.items():
__a = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 235
| 0
|
def lowerCAmelCase_ ( __a , __a ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
snake_case : Union[str, Any] = logging.getLogger(__name__)
snake_case : Optional[int] = "pytorch_model.bin"
@dataclasses.dataclass
class _snake_case :
UpperCamelCase__ = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class _snake_case :
UpperCamelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'The name of the task to train on.'} , )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class _snake_case :
UpperCamelCase__ = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
UpperCamelCase__ = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
UpperCamelCase__ = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
UpperCamelCase__ = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
UpperCamelCase__ = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
UpperCamelCase__ = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
UpperCamelCase__ = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
UpperCamelCase__ = dataclasses.field(
default=snake_case , metadata={'help': 'Random seed for initialization.'} , )
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : int , _snake_case : str , _snake_case : int , _snake_case : Tuple , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : str = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__magic_name__ : Optional[Any] = dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__magic_name__ : Union[str, Any] = int(eval_result * len(_snake_case ) )
print(_snake_case )
__magic_name__ : List[Any] = dataset.sort("probability" , reverse=_snake_case )
__magic_name__ : Union[str, Any] = dataset.select(range(_snake_case ) )
__magic_name__ : Optional[Any] = dataset.remove_columns(["label", "probability"] )
__magic_name__ : Optional[int] = dataset.rename_column("prediction" , "label" )
__magic_name__ : int = dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__magic_name__ : List[Any] = dataset.shuffle(seed=args.seed )
__magic_name__ : Any = os.path.join(_snake_case , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Dict , **_snake_case : Any ) -> List[str]:
'''simple docstring'''
__magic_name__ : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__magic_name__ : str = STModelArguments(model_name_or_path=_snake_case )
__magic_name__ : Optional[Any] = STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__magic_name__ : List[Any] = STTrainingArguments(output_dir=_snake_case )
__magic_name__ : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__magic_name__ : Tuple = {}
__magic_name__ : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__magic_name__ : List[str] = args.train_file
__magic_name__ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__magic_name__ : Union[str, Any] = args.eval_file
for key in data_files:
__magic_name__ : List[str] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__magic_name__ : List[Any] = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__magic_name__ : Tuple = F'''{args.output_dir}/self-train_iter-{{}}'''.format
__magic_name__ : int = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__magic_name__ : Optional[Any] = None
__magic_name__ : Tuple = None
__magic_name__ : str = 0
__magic_name__ : str = False
# Show the progress bar
__magic_name__ : str = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__magic_name__ : Optional[Any] = data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__magic_name__ : Union[str, Any] = os.path.join(_snake_case , "stage-1" )
__magic_name__ : Union[str, Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__magic_name__ : Union[str, Any] = os.path.join(_snake_case , "best-checkpoint" , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _snake_case , _snake_case , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 1." , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__magic_name__ : List[Any] = os.path.join(_snake_case , "best-checkpoint" )
__magic_name__ : List[Any] = os.path.join(_snake_case , "stage-2" )
# Update arguments_dict
__magic_name__ : List[str] = model_path
__magic_name__ : str = data_files["train"]
__magic_name__ : Any = current_output_dir
__magic_name__ : Any = os.path.join(_snake_case , "best-checkpoint" , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _snake_case , _snake_case , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 2." , _snake_case )
__magic_name__ : Tuple = iteration
__magic_name__ : Any = data_dir_format(iteration + 1 )
__magic_name__ : str = AutoConfig.from_pretrained(os.path.join(_snake_case , "best-checkpoint" ) )
__magic_name__ : Optional[int] = config.idalabel
__magic_name__ : List[Any] = os.path.join(_snake_case , "eval_results_best-checkpoint.json" )
__magic_name__ : List[Any] = os.path.join(_snake_case , "test_results_best-checkpoint.json" )
assert os.path.exists(_snake_case )
with open(_snake_case , "r" ) as f:
__magic_name__ : Dict = float(json.load(_snake_case )[args.eval_metric] )
__magic_name__ : Tuple = os.path.join(_snake_case , "infer_output_best-checkpoint.csv" )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__magic_name__ : int = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
__magic_name__ : Optional[Any] = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__magic_name__ : Optional[Any] = os.path.join(_snake_case , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__magic_name__ : Optional[int] = eval_result
if best_iteration is None:
__magic_name__ : Union[str, Any] = new_iteration
__magic_name__ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__magic_name__ : str = new_iteration
__magic_name__ : str = new_eval_result
__magic_name__ : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
__magic_name__ : str = new_iteration
__magic_name__ : Any = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__magic_name__ : int = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , _snake_case )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(_snake_case , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_snake_case , "eval_results_best-iteration.json" ) , )
| 350
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
UpperCamelCase__ = 1_0000
UpperCamelCase__ = None
UpperCamelCase__ = None
class _snake_case ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = ParquetConfig
def SCREAMING_SNAKE_CASE ( self ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self , _a ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__magic_name__ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
__magic_name__ : Dict = data_files
if isinstance(_a , _a ):
__magic_name__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : Tuple = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__magic_name__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
__magic_name__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : Optional[int] = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , "rb" ) as f:
__magic_name__ : str = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__magic_name__ : str = table_cast(_a , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , "rb" ) as f:
__magic_name__ : List[str] = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__magic_name__ : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(_a )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise
| 41
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 252
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case_ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 200
| 0
|
"""simple docstring"""
import pprint
import requests
_lowerCAmelCase : List[str] = """https://zenquotes.io/api"""
def SCREAMING_SNAKE_CASE__ ( )-> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def SCREAMING_SNAKE_CASE__ ( )-> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = random_quotes()
pprint.pprint(response)
| 298
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = 'huggingface/label-files'
_lowerCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCamelCase : List[Any] = BitConfig(
conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def _snake_case ( lowercase__ ):
if "stem.conv" in name:
_lowerCamelCase : int = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowerCamelCase : int = name.replace('blocks' , 'layers' )
if "head.fc" in name:
_lowerCamelCase : Union[str, Any] = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_lowerCamelCase : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_lowerCamelCase : int = 'bit.encoder.' + name
return name
def _snake_case ( ):
_lowerCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__=False ):
_lowerCamelCase : Tuple = get_config(lowercase__ )
# load original model from timm
_lowerCamelCase : int = create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model
_lowerCamelCase : Dict = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCamelCase : Dict = state_dict.pop(lowercase__ )
_lowerCamelCase : str = val.squeeze() if 'head' in key else val
# load HuggingFace model
_lowerCamelCase : List[Any] = BitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# create image processor
_lowerCamelCase : Dict = create_transform(**resolve_data_config({} , model=lowercase__ ) )
_lowerCamelCase : str = transform.transforms
_lowerCamelCase : int = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_lowerCamelCase : int = BitImageProcessor(
do_resize=lowercase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Optional[Any] = transform(lowercase__ ).unsqueeze(0 )
_lowerCamelCase : Optional[Any] = processor(lowercase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
_lowerCamelCase : List[str] = model(lowercase__ )
_lowerCamelCase : Dict = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCamelCase : List[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 96
|
'''simple docstring'''
import argparse
import os
import re
_snake_case = 'src/transformers'
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(r'\[([^\]]+)\]')
def _A ( snake_case ) -> str:
_lowercase : Union[str, Any] = _re_indent.search(snake_case )
return "" if search is None else search.groups()[0]
def _A ( snake_case , snake_case="" , snake_case=None , snake_case=None ) -> Optional[int]:
_lowercase : List[str] = 0
_lowercase : str = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(snake_case ):
index += 1
_lowercase : Optional[int] = ["\n".join(lines[:index] )]
else:
_lowercase : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : Any = [lines[index]]
index += 1
while index < len(snake_case ) and (end_prompt is None or not lines[index].startswith(snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(snake_case ) )
if index < len(snake_case ) - 1:
_lowercase : int = [lines[index + 1]]
index += 1
else:
_lowercase : Optional[int] = []
else:
blocks.append("\n".join(snake_case ) )
_lowercase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case ) > 0:
blocks.append("\n".join(snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _A ( snake_case ) -> Optional[int]:
def _inner(snake_case ):
return key(snake_case ).lower().replace("_" , "" )
return _inner
def _A ( snake_case , snake_case=None ) -> List[str]:
# If no key is provided, we use a noop.
def noop(snake_case ):
return x
if key is None:
_lowercase : Optional[int] = noop
# Constants are all uppercase, they go first.
_lowercase : Dict = [obj for obj in objects if key(snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : int = [obj for obj in objects if key(snake_case )[0].isupper() and not key(snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Dict = [obj for obj in objects if not key(snake_case )[0].isupper()]
_lowercase : Union[str, Any] = ignore_underscore(snake_case )
return sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case )
def _A ( snake_case ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(snake_case ):
_lowercase : Optional[Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase : str = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] ) + "]"
_lowercase : Tuple = import_statement.split("\n" )
if len(snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Union[str, Any] = 2 if lines[1].strip() == "[" else 1
_lowercase : Optional[int] = [(i, _re_strip_line.search(snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Any = sort_objects(snake_case , key=lambda snake_case : x[1] )
_lowercase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
_lowercase : Optional[Any] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] )
return "\n".join(snake_case )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : Optional[Any] = _re_bracket_content.sub(_replace , snake_case )
return import_statement
def _A ( snake_case , snake_case=True ) -> Dict:
with open(snake_case , encoding="utf-8" ) as f:
_lowercase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[Any] = split_code_in_indented_blocks(
snake_case , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Dict = main_blocks[block_idx]
_lowercase : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
_lowercase : int = 0
while line_idx < len(snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : Optional[Any] = len(snake_case )
else:
line_idx += 1
if line_idx >= len(snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : Any = "\n".join(block_lines[line_idx:-1] )
_lowercase : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : Optional[int] = split_code_in_indented_blocks(snake_case , indent_level=snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : str = [(pattern.search(snake_case ).groups()[0] if pattern.search(snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[str] = [(i, key) for i, key in enumerate(snake_case ) if key is not None]
_lowercase : Tuple = [x[0] for x in sorted(snake_case , key=lambda snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : Any = 0
_lowercase : str = []
for i in range(len(snake_case ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : int = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case ) )
def _A ( snake_case=True ) -> str:
_lowercase : List[Any] = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
_lowercase : Tuple = sort_imports(os.path.join(snake_case , "__init__.py" ) , check_only=snake_case )
if result:
_lowercase : Any = [os.path.join(snake_case , "__init__.py" )]
if len(snake_case ) > 0:
raise ValueError(F'''Would overwrite {len(snake_case )} files, run `make style`.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 250
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = {'''source''': '''What is love ?''', '''target''': '''life'''}
UpperCAmelCase = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_snake_case , f"""{split}.{field}""" ) , '''w''' ) as f:
f.write(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case = "pytorch" ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = os.path.join(_snake_case , '''output''' )
UpperCAmelCase = os.path.join(_snake_case , '''data''' )
self._create_dummy_data(data_dir=_snake_case )
UpperCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_snake_case , env=self.get_env() )
UpperCAmelCase = os.path.join(_snake_case , '''metrics.json''' )
with open(_snake_case ) as f:
UpperCAmelCase = json.load(_snake_case )
return result
@require_torch_gpu
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 152
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = GPTaTokenizer
UpperCAmelCase__ : str = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = {"add_prefix_space": True}
UpperCAmelCase__ : int = False
def __lowercase ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_a : List[Any] = dict(zip(_a , range(len(_a ) ) ) )
_a : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a : List[str] = {'''unk_token''': '''<unk>'''}
_a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __lowercase ( self , **_a ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Tuple = '''lower newer'''
_a : Tuple = '''lower newer'''
return input_text, output_text
def __lowercase ( self ) -> Any:
_a : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : int = '''lower newer'''
_a : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a : List[str] = tokenizer.tokenize(_a , add_prefix_space=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokens + [tokenizer.unk_token]
_a : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def __lowercase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_a : int = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer(add_prefix_space=_a )
_a : Tuple = '''lower newer'''
# Testing tokenization
_a : List[str] = tokenizer.tokenize(_a , add_prefix_space=_a )
_a : Optional[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids without special tokens
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
_a : List[Any] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids with special tokens
_a : List[str] = self.get_rust_tokenizer(add_prefix_space=_a )
_a : List[str] = tokenizer.encode(_a , add_prefix_space=_a )
_a : Tuple = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# Testing the unknown token
_a : Optional[Any] = tokens + [rust_tokenizer.unk_token]
_a : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a )
def __lowercase ( self , *_a , **_a ) -> int:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowercase ( self , _a=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
_a : List[str] = '''This is a simple input'''
_a : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_a : Tuple = ('''This is a simple input''', '''This is a pair''')
_a : Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , )
def __lowercase ( self ) -> List[Any]:
_a : int = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_a : int = '''This is a simple input'''
_a : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
_a : Any = ('''This is a simple input''', '''This is a pair''')
_a : Dict = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_a : Optional[int] = tokenizer.pad_token_id
_a : List[Any] = tokenizer(_a , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_a : Optional[int] = tokenizer(_a , padding=_a , truncate=_a , return_tensors='''np''' )
_a : int = tokenizer(*_a , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_a : Union[str, Any] = tokenizer(_a , padding=_a , truncate=_a , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = '''$$$'''
_a : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a )
_a : Any = '''This is a simple input'''
_a : str = ['''This is a simple input 1''', '''This is a simple input 2''']
_a : Tuple = tokenizer.bos_token_id
_a : Optional[Any] = tokenizer(_a )
_a : str = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] , _a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_a : str = tokenizer.decode(out_s.input_ids )
_a : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowercase ( self ) -> str:
pass
def __lowercase ( self ) -> Dict:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_a : Optional[int] = [self.get_tokenizer(do_lower_case=_a , add_bos_token=_a )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : Tuple = '''Encode this.'''
_a : Optional[Any] = '''This one too please.'''
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
encoded_sequence += tokenizer.encode(_a , add_special_tokens=_a )
_a : List[str] = tokenizer.encode_plus(
_a , _a , add_special_tokens=_a , return_special_tokens_mask=_a , )
_a : int = encoded_sequence_dict['''input_ids''']
_a : int = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_a ) , len(_a ) )
_a : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_a )
]
_a : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(_a , _a )
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_a : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_a )
_a : int = '''A photo of a cat'''
_a : List[Any] = tokenizer.encode(
_a , )
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_a : Union[str, Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
_a : Any = tokenizer.encode(
_a , )
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __lowercase ( self ) -> int:
_a : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=_a )
_a : Any = '''A photo of a cat'''
_a : Optional[int] = tokenizer.encode(
_a , )
# Same as above
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_a )
_a : Optional[Any] = '''bos'''
_a : Optional[Any] = tokenizer.get_vocab()['''bos''']
_a : str = '''A photo of a cat'''
_a : int = tokenizer.encode(
_a , )
# We changed the bos token
self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_a : Dict = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_a : Union[str, Any] = tokenizer.encode(
_a , )
self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 235
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = {}
lowerCAmelCase__ = vae_state_dict['encoder.conv_in.weight']
lowerCAmelCase__ = vae_state_dict['encoder.conv_in.bias']
lowerCAmelCase__ = vae_state_dict['encoder.conv_out.weight']
lowerCAmelCase__ = vae_state_dict['encoder.conv_out.bias']
lowerCAmelCase__ = vae_state_dict['encoder.norm_out.weight']
lowerCAmelCase__ = vae_state_dict['encoder.norm_out.bias']
lowerCAmelCase__ = vae_state_dict['decoder.conv_in.weight']
lowerCAmelCase__ = vae_state_dict['decoder.conv_in.bias']
lowerCAmelCase__ = vae_state_dict['decoder.conv_out.weight']
lowerCAmelCase__ = vae_state_dict['decoder.conv_out.bias']
lowerCAmelCase__ = vae_state_dict['decoder.norm_out.weight']
lowerCAmelCase__ = vae_state_dict['decoder.norm_out.bias']
lowerCAmelCase__ = vae_state_dict['quant_conv.weight']
lowerCAmelCase__ = vae_state_dict['quant_conv.bias']
lowerCAmelCase__ = vae_state_dict['post_quant_conv.weight']
lowerCAmelCase__ = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowerCAmelCase__ = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowerCAmelCase__ = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
lowerCAmelCase__ = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCAmelCase__ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
lowerCAmelCase__ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
lowerCAmelCase__ = renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCAmelCase__ = {'old': F"""down.{i}.block""", 'new': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCAmelCase__ = [key for key in vae_state_dict if 'encoder.mid.block' in key]
lowerCAmelCase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase__ = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
lowerCAmelCase__ = renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCAmelCase__ = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCAmelCase__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
lowerCAmelCase__ = renew_vae_attention_paths(lowerCAmelCase__ )
lowerCAmelCase__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
lowerCAmelCase__ = num_up_blocks - 1 - i
lowerCAmelCase__ = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCAmelCase__ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCAmelCase__ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCAmelCase__ = renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCAmelCase__ = {'old': F"""up.{block_id}.block""", 'new': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCAmelCase__ = [key for key in vae_state_dict if 'decoder.mid.block' in key]
lowerCAmelCase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase__ = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
lowerCAmelCase__ = renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCAmelCase__ = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCAmelCase__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
lowerCAmelCase__ = renew_vae_attention_paths(lowerCAmelCase__ )
lowerCAmelCase__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , ):
# Only support V1
lowerCAmelCase__ = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowerCAmelCase__ = io.BytesIO(r.content )
lowerCAmelCase__ = OmegaConf.load(lowerCAmelCase__ )
lowerCAmelCase__ = 5_1_2
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowerCAmelCase__ = {}
with safe_open(lowerCAmelCase__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
lowerCAmelCase__ = f.get_tensor(lowerCAmelCase__ )
else:
lowerCAmelCase__ = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )['state_dict']
# Convert the VAE model.
lowerCAmelCase__ = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
lowerCAmelCase__ = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 354
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , *lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[int]):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__)
lowerCAmelCase__ = eval_examples
lowerCAmelCase__ = post_process_function
lowerCAmelCase__ = quant_trainer_args
lowerCAmelCase__ = 128 # default number of calibration samples
def __snake_case ( self : Tuple , lowercase__ : Any=None):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
lowerCAmelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ = self._remove_unused_columns(lowercase__ , description='Calibration')
return DataLoader(
lowercase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase__ , )
def __snake_case ( self : List[Any] , lowercase__ : Union[str, Any]=None):
'''simple docstring'''
lowerCAmelCase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ = self.get_calib_dataloader(lowercase__)
lowerCAmelCase__ = self.model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args , calib=lowercase__)
model.eval()
quant_trainer.enable_calibration(lowercase__)
logger.info('***** Running calibration *****')
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(lowercase__):
# Prediction step
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prediction_step(lowercase__ , lowercase__ , prediction_loss_only=lowercase__)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = model
def __snake_case ( self : Optional[Any] , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : str = "eval"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions)
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
self.log(lowercase__)
else:
lowerCAmelCase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__)
return metrics
def __snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : List[str]=None , lowercase__ : str = "test"):
'''simple docstring'''
lowerCAmelCase__ = self.get_test_dataloader(lowercase__)
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions , 'predict')
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__)
def __snake_case ( self : List[str] , lowercase__ : List[str]="./"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = next(iter(lowercase__))
# saving device - to make it consistent
lowerCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
lowerCAmelCase__ = tuple(v.to(lowercase__) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ = True
lowerCAmelCase__ = self.model.to(lowercase__)
model.eval()
model.float()
lowerCAmelCase__ = model.module if hasattr(lowercase__ , 'module') else model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = os.path.join(lowercase__ , 'model.onnx')
logger.info(F"""exporting model to {output_model_file}""")
lowerCAmelCase__ = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
lowercase__ , lowercase__ , lowercase__ , export_params=lowercase__ , opset_version=13 , do_constant_folding=lowercase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=lowercase__ , )
logger.info('onnx export finished')
| 119
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = BlipImageProcessor()
lowercase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowercase__ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
lowercase__ = InstructBlipProcessor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).qformer_tokenizer
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
lowercase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(UpperCamelCase_ , return_tensors='''np''' )
lowercase__ = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=UpperCamelCase_ )
lowercase__ = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
lowercase__ = qformer_tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(UpperCamelCase_ )
lowercase__ = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 110
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0 ):
__snake_case , __snake_case : int = 1, 1
__snake_case : Tuple = []
for i in range(1 , n + 1 ):
__snake_case : List[str] = prev_numerator + 2 * prev_denominator
__snake_case : Any = prev_numerator + prev_denominator
if len(str(__lowerCamelCase ) ) > len(str(__lowerCamelCase ) ):
result.append(__lowerCamelCase )
__snake_case : List[Any] = numerator
__snake_case : Union[str, Any] = denominator
return len(__lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase = 2_0 ):
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
__snake_case : Any = lcm(__lowerCamelCase , __lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = 0
for ch in input_str:
__UpperCamelCase : int = ord(snake_case__ )
__UpperCamelCase : Optional[int] = pow(2 , snake_case__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __snake_case ( _UpperCAmelCase=None ):
if subparsers is not None:
__a = subparsers.add_parser('''env''' )
else:
__a = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __snake_case ( _UpperCAmelCase ):
__a = torch.__version__
__a = torch.cuda.is_available()
__a = is_xpu_available()
__a = is_npu_available()
__a = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__a = load_config_from_file(args.config_file ).to_dict()
__a = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})',
'''PyTorch XPU available''': str(_UpperCAmelCase ),
'''PyTorch NPU available''': str(_UpperCAmelCase ),
'''System RAM''': f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
__a = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__a = (
'''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else f'\t{accelerate_config}'
)
print(_UpperCAmelCase )
__a = accelerate_config
return info
def __snake_case ( ):
__a = env_command_parser()
__a = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 367
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
__a = f'{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'
return "".join(_UpperCAmelCase )
# matching characters
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = len(_UpperCAmelCase )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 131
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.